4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 /* Portions Copyright 2007 Jeremy Teo */
26 /* Portions Copyright 2010 Robert Milkowski */
29 #include <sys/types.h>
30 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/sysmacros.h>
34 #include <sys/resource.h>
36 #include <sys/vfs_opreg.h>
40 #include <sys/taskq.h>
42 #include <sys/vmsystm.h>
43 #include <sys/atomic.h>
45 #include <sys/pathname.h>
46 #include <sys/cmn_err.h>
47 #include <sys/errno.h>
48 #include <sys/unistd.h>
49 #include <sys/zfs_dir.h>
50 #include <sys/zfs_acl.h>
51 #include <sys/zfs_ioctl.h>
52 #include <sys/fs/zfs.h>
54 #include <sys/dmu_objset.h>
60 #include <sys/dirent.h>
61 #include <sys/policy.h>
62 #include <sys/sunddi.h>
65 #include "fs/fs_subr.h"
66 #include <sys/zfs_fuid.h>
67 #include <sys/zfs_sa.h>
68 #include <sys/zfs_vnops.h>
70 #include <sys/zfs_rlock.h>
71 #include <sys/extdirent.h>
72 #include <sys/kidmap.h>
79 * Each vnode op performs some logical unit of work. To do this, the ZPL must
80 * properly lock its in-core state, create a DMU transaction, do the work,
81 * record this work in the intent log (ZIL), commit the DMU transaction,
82 * and wait for the intent log to commit if it is a synchronous operation.
83 * Moreover, the vnode ops must work in both normal and log replay context.
84 * The ordering of events is important to avoid deadlocks and references
85 * to freed memory. The example below illustrates the following Big Rules:
87 * (1) A check must be made in each zfs thread for a mounted file system.
88 * This is done avoiding races using ZFS_ENTER(zsb).
89 * A ZFS_EXIT(zsb) is needed before all returns. Any znodes
90 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
91 * can return EIO from the calling function.
93 * (2) iput() should always be the last thing except for zil_commit()
94 * (if necessary) and ZFS_EXIT(). This is for 3 reasons:
95 * First, if it's the last reference, the vnode/znode
96 * can be freed, so the zp may point to freed memory. Second, the last
97 * reference will call zfs_zinactive(), which may induce a lot of work --
98 * pushing cached pages (which acquires range locks) and syncing out
99 * cached atime changes. Third, zfs_zinactive() may require a new tx,
100 * which could deadlock the system if you were already holding one.
101 * If you must call iput() within a tx then use iput_ASYNC().
103 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
104 * as they can span dmu_tx_assign() calls.
106 * (4) Always pass TXG_NOWAIT as the second argument to dmu_tx_assign().
107 * This is critical because we don't want to block while holding locks.
108 * Note, in particular, that if a lock is sometimes acquired before
109 * the tx assigns, and sometimes after (e.g. z_lock), then failing to
110 * use a non-blocking assign can deadlock the system. The scenario:
112 * Thread A has grabbed a lock before calling dmu_tx_assign().
113 * Thread B is in an already-assigned tx, and blocks for this lock.
114 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
115 * forever, because the previous txg can't quiesce until B's tx commits.
117 * If dmu_tx_assign() returns ERESTART and zsb->z_assign is TXG_NOWAIT,
118 * then drop all locks, call dmu_tx_wait(), and try again.
120 * (5) If the operation succeeded, generate the intent log entry for it
121 * before dropping locks. This ensures that the ordering of events
122 * in the intent log matches the order in which they actually occurred.
123 * During ZIL replay the zfs_log_* functions will update the sequence
124 * number to indicate the zil transaction has replayed.
126 * (6) At the end of each vnode op, the DMU tx must always commit,
127 * regardless of whether there were any errors.
129 * (7) After dropping all locks, invoke zil_commit(zilog, foid)
130 * to ensure that synchronous semantics are provided when necessary.
132 * In general, this is how things should be ordered in each vnode op:
134 * ZFS_ENTER(zsb); // exit if unmounted
136 * zfs_dirent_lock(&dl, ...) // lock directory entry (may igrab())
137 * rw_enter(...); // grab any other locks you need
138 * tx = dmu_tx_create(...); // get DMU tx
139 * dmu_tx_hold_*(); // hold each object you might modify
140 * error = dmu_tx_assign(tx, TXG_NOWAIT); // try to assign
142 * rw_exit(...); // drop locks
143 * zfs_dirent_unlock(dl); // unlock directory entry
144 * iput(...); // release held vnodes
145 * if (error == ERESTART) {
150 * dmu_tx_abort(tx); // abort DMU tx
151 * ZFS_EXIT(zsb); // finished in zfs
152 * return (error); // really out of space
154 * error = do_real_work(); // do whatever this VOP does
156 * zfs_log_*(...); // on success, make ZIL entry
157 * dmu_tx_commit(tx); // commit DMU tx -- error or not
158 * rw_exit(...); // drop locks
159 * zfs_dirent_unlock(dl); // unlock directory entry
160 * iput(...); // release held vnodes
161 * zil_commit(zilog, foid); // synchronous when necessary
162 * ZFS_EXIT(zsb); // finished in zfs
163 * return (error); // done, report error
166 #if defined(_KERNEL) && defined(HAVE_MMAP)
168 * Utility functions to map and unmap a single physical page. These
169 * are used to manage the mappable copies of ZFS file data, and therefore
170 * do not update ref/mod bits.
173 zfs_map_page(page_t
*pp
, enum seg_rw rw
)
176 return (hat_kpm_mapin(pp
, 0));
177 ASSERT(rw
== S_READ
|| rw
== S_WRITE
);
178 return (ppmapin(pp
, PROT_READ
| ((rw
== S_WRITE
) ? PROT_WRITE
: 0),
183 zfs_unmap_page(page_t
*pp
, caddr_t addr
)
186 hat_kpm_mapout(pp
, 0, addr
);
193 * When a file is memory mapped, we must keep the IO data synchronized
194 * between the DMU cache and the memory mapped pages. What this means:
196 * On Write: If we find a memory mapped page, we write to *both*
197 * the page and the dmu buffer.
200 update_pages(struct inode
*ip
, int64_t start
, int len
, objset_t
*os
,
205 off
= start
& PAGEOFFSET
;
206 for (start
&= PAGEMASK
; len
> 0; start
+= PAGESIZE
) {
208 uint64_t nbytes
= MIN(PAGESIZE
- off
, len
);
210 if (pp
= page_lookup(ip
, start
, SE_SHARED
)) {
213 va
= zfs_map_page(pp
, S_WRITE
);
214 (void) dmu_read(os
, oid
, start
+off
, nbytes
, va
+off
,
216 zfs_unmap_page(pp
, va
);
225 * When a file is memory mapped, we must keep the IO data synchronized
226 * between the DMU cache and the memory mapped pages. What this means:
228 * On Read: We "read" preferentially from memory mapped pages,
229 * else we default from the dmu buffer.
231 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
232 * the file is memory mapped.
235 mappedread(struct inode
*ip
, int nbytes
, uio_t
*uio
)
237 znode_t
*zp
= ITOZ(ip
);
238 objset_t
*os
= ITOZSB(ip
)->z_os
;
243 start
= uio
->uio_loffset
;
244 off
= start
& PAGEOFFSET
;
245 for (start
&= PAGEMASK
; len
> 0; start
+= PAGESIZE
) {
247 uint64_t bytes
= MIN(PAGESIZE
- off
, len
);
249 if (pp
= page_lookup(ip
, start
, SE_SHARED
)) {
252 va
= zfs_map_page(pp
, S_READ
);
253 error
= uiomove(va
+ off
, bytes
, UIO_READ
, uio
);
254 zfs_unmap_page(pp
, va
);
257 error
= dmu_read_uio(os
, zp
->z_id
, uio
, bytes
);
266 #endif /* _KERNEL && HAVE_MMAP */
268 offset_t zfs_read_chunk_size
= 1024 * 1024; /* Tunable */
271 * Read bytes from specified file into supplied buffer.
273 * IN: ip - inode of file to be read from.
274 * uio - structure supplying read location, range info,
276 * ioflag - SYNC flags; used to provide FRSYNC semantics.
277 * cr - credentials of caller.
279 * OUT: uio - updated offset and range, buffer filled.
281 * RETURN: 0 if success
282 * error code if failure
285 * inode - atime updated if byte count > 0
289 zfs_read(struct inode
*ip
, uio_t
*uio
, int ioflag
, cred_t
*cr
)
291 znode_t
*zp
= ITOZ(ip
);
292 zfs_sb_t
*zsb
= ITOZSB(ip
);
297 #ifdef HAVE_UIO_ZEROCOPY
299 #endif /* HAVE_UIO_ZEROCOPY */
305 if (zp
->z_pflags
& ZFS_AV_QUARANTINED
) {
311 * Validate file offset
313 if (uio
->uio_loffset
< (offset_t
)0) {
319 * Fasttrack empty reads
321 if (uio
->uio_resid
== 0) {
326 #ifdef HAVE_MANDLOCKS
328 * Check for mandatory locks
330 if (MANDMODE(zp
->z_mode
)) {
331 if (error
= chklock(ip
, FREAD
,
332 uio
->uio_loffset
, uio
->uio_resid
, uio
->uio_fmode
, ct
)) {
337 #endif /* HAVE_MANDLOCK */
340 * If we're in FRSYNC mode, sync out this znode before reading it.
342 if (ioflag
& FRSYNC
|| zsb
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
343 zil_commit(zsb
->z_log
, zp
->z_id
);
346 * Lock the range against changes.
348 rl
= zfs_range_lock(zp
, uio
->uio_loffset
, uio
->uio_resid
, RL_READER
);
351 * If we are reading past end-of-file we can skip
352 * to the end; but we might still need to set atime.
354 if (uio
->uio_loffset
>= zp
->z_size
) {
359 ASSERT(uio
->uio_loffset
< zp
->z_size
);
360 n
= MIN(uio
->uio_resid
, zp
->z_size
- uio
->uio_loffset
);
362 #ifdef HAVE_UIO_ZEROCOPY
363 if ((uio
->uio_extflg
== UIO_XUIO
) &&
364 (((xuio_t
*)uio
)->xu_type
== UIOTYPE_ZEROCOPY
)) {
366 int blksz
= zp
->z_blksz
;
367 uint64_t offset
= uio
->uio_loffset
;
369 xuio
= (xuio_t
*)uio
;
371 nblk
= (P2ROUNDUP(offset
+ n
, blksz
) - P2ALIGN(offset
,
374 ASSERT(offset
+ n
<= blksz
);
377 (void) dmu_xuio_init(xuio
, nblk
);
379 if (vn_has_cached_data(ip
)) {
381 * For simplicity, we always allocate a full buffer
382 * even if we only expect to read a portion of a block.
384 while (--nblk
>= 0) {
385 (void) dmu_xuio_add(xuio
,
386 dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
391 #endif /* HAVE_UIO_ZEROCOPY */
394 nbytes
= MIN(n
, zfs_read_chunk_size
-
395 P2PHASE(uio
->uio_loffset
, zfs_read_chunk_size
));
397 /* XXX: Drop this, ARC update handled by zpl layer */
399 if (vn_has_cached_data(ip
))
400 error
= mappedread(ip
, nbytes
, uio
);
402 error
= dmu_read_uio(os
, zp
->z_id
, uio
, nbytes
);
404 error
= dmu_read_uio(os
, zp
->z_id
, uio
, nbytes
);
405 #endif /* HAVE_MMAP */
407 /* convert checksum errors into IO errors */
416 zfs_range_unlock(rl
);
418 ZFS_ACCESSTIME_STAMP(zsb
, zp
);
419 zfs_inode_update(zp
);
423 EXPORT_SYMBOL(zfs_read
);
426 * Write the bytes to a file.
428 * IN: ip - inode of file to be written to.
429 * uio - structure supplying write location, range info,
431 * ioflag - FAPPEND flag set if in append mode.
432 * cr - credentials of caller.
434 * OUT: uio - updated offset and range.
436 * RETURN: 0 if success
437 * error code if failure
440 * ip - ctime|mtime updated if byte count > 0
445 zfs_write(struct inode
*ip
, uio_t
*uio
, int ioflag
, cred_t
*cr
)
447 znode_t
*zp
= ITOZ(ip
);
448 rlim64_t limit
= uio
->uio_limit
;
449 ssize_t start_resid
= uio
->uio_resid
;
453 zfs_sb_t
*zsb
= ZTOZSB(zp
);
458 int max_blksz
= zsb
->z_max_blksz
;
461 iovec_t
*aiov
= NULL
;
464 iovec_t
*iovp
= uio
->uio_iov
;
467 sa_bulk_attr_t bulk
[4];
468 uint64_t mtime
[2], ctime
[2];
469 ASSERTV(int iovcnt
= uio
->uio_iovcnt
);
472 * Fasttrack empty write
478 if (limit
== RLIM64_INFINITY
|| limit
> MAXOFFSET_T
)
484 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zsb
), NULL
, &mtime
, 16);
485 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zsb
), NULL
, &ctime
, 16);
486 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_SIZE(zsb
), NULL
, &zp
->z_size
, 8);
487 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zsb
), NULL
,
491 * If immutable or not appending then return EPERM
493 if ((zp
->z_pflags
& (ZFS_IMMUTABLE
| ZFS_READONLY
)) ||
494 ((zp
->z_pflags
& ZFS_APPENDONLY
) && !(ioflag
& FAPPEND
) &&
495 (uio
->uio_loffset
< zp
->z_size
))) {
503 * Validate file offset
505 woff
= ioflag
& FAPPEND
? zp
->z_size
: uio
->uio_loffset
;
511 #ifdef HAVE_MANDLOCKS
513 * Check for mandatory locks before calling zfs_range_lock()
514 * in order to prevent a deadlock with locks set via fcntl().
516 if (MANDMODE((mode_t
)zp
->z_mode
) &&
517 (error
= chklock(ip
, FWRITE
, woff
, n
, uio
->uio_fmode
, ct
)) != 0) {
521 #endif /* HAVE_MANDLOCKS */
523 #ifdef HAVE_UIO_ZEROCOPY
525 * Pre-fault the pages to ensure slow (eg NFS) pages
527 * Skip this if uio contains loaned arc_buf.
529 if ((uio
->uio_extflg
== UIO_XUIO
) &&
530 (((xuio_t
*)uio
)->xu_type
== UIOTYPE_ZEROCOPY
))
531 xuio
= (xuio_t
*)uio
;
533 uio_prefaultpages(MIN(n
, max_blksz
), uio
);
534 #endif /* HAVE_UIO_ZEROCOPY */
537 * If in append mode, set the io offset pointer to eof.
539 if (ioflag
& FAPPEND
) {
541 * Obtain an appending range lock to guarantee file append
542 * semantics. We reset the write offset once we have the lock.
544 rl
= zfs_range_lock(zp
, 0, n
, RL_APPEND
);
546 if (rl
->r_len
== UINT64_MAX
) {
548 * We overlocked the file because this write will cause
549 * the file block size to increase.
550 * Note that zp_size cannot change with this lock held.
554 uio
->uio_loffset
= woff
;
557 * Note that if the file block size will change as a result of
558 * this write, then this range lock will lock the entire file
559 * so that we can re-write the block safely.
561 rl
= zfs_range_lock(zp
, woff
, n
, RL_WRITER
);
565 zfs_range_unlock(rl
);
570 if ((woff
+ n
) > limit
|| woff
> (limit
- n
))
573 /* Will this write extend the file length? */
574 write_eof
= (woff
+ n
> zp
->z_size
);
576 end_size
= MAX(zp
->z_size
, woff
+ n
);
579 * Write the file in reasonable size chunks. Each chunk is written
580 * in a separate transaction; this keeps the intent log records small
581 * and allows us to do more fine-grained space accounting.
585 woff
= uio
->uio_loffset
;
587 if (zfs_owner_overquota(zsb
, zp
, B_FALSE
) ||
588 zfs_owner_overquota(zsb
, zp
, B_TRUE
)) {
590 dmu_return_arcbuf(abuf
);
595 if (xuio
&& abuf
== NULL
) {
596 ASSERT(i_iov
< iovcnt
);
598 abuf
= dmu_xuio_arcbuf(xuio
, i_iov
);
599 dmu_xuio_clear(xuio
, i_iov
);
600 ASSERT((aiov
->iov_base
== abuf
->b_data
) ||
601 ((char *)aiov
->iov_base
- (char *)abuf
->b_data
+
602 aiov
->iov_len
== arc_buf_size(abuf
)));
604 } else if (abuf
== NULL
&& n
>= max_blksz
&&
605 woff
>= zp
->z_size
&&
606 P2PHASE(woff
, max_blksz
) == 0 &&
607 zp
->z_blksz
== max_blksz
) {
609 * This write covers a full block. "Borrow" a buffer
610 * from the dmu so that we can fill it before we enter
611 * a transaction. This avoids the possibility of
612 * holding up the transaction if the data copy hangs
613 * up on a pagefault (e.g., from an NFS server mapping).
617 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
619 ASSERT(abuf
!= NULL
);
620 ASSERT(arc_buf_size(abuf
) == max_blksz
);
621 if ((error
= uiocopy(abuf
->b_data
, max_blksz
,
622 UIO_WRITE
, uio
, &cbytes
))) {
623 dmu_return_arcbuf(abuf
);
626 ASSERT(cbytes
== max_blksz
);
630 * Start a transaction.
632 tx
= dmu_tx_create(zsb
->z_os
);
633 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
634 dmu_tx_hold_write(tx
, zp
->z_id
, woff
, MIN(n
, max_blksz
));
635 zfs_sa_upgrade_txholds(tx
, zp
);
636 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
638 if (error
== ERESTART
) {
645 dmu_return_arcbuf(abuf
);
650 * If zfs_range_lock() over-locked we grow the blocksize
651 * and then reduce the lock range. This will only happen
652 * on the first iteration since zfs_range_reduce() will
653 * shrink down r_len to the appropriate size.
655 if (rl
->r_len
== UINT64_MAX
) {
658 if (zp
->z_blksz
> max_blksz
) {
659 ASSERT(!ISP2(zp
->z_blksz
));
660 new_blksz
= MIN(end_size
, SPA_MAXBLOCKSIZE
);
662 new_blksz
= MIN(end_size
, max_blksz
);
664 zfs_grow_blocksize(zp
, new_blksz
, tx
);
665 zfs_range_reduce(rl
, woff
, n
);
669 * XXX - should we really limit each write to z_max_blksz?
670 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
672 nbytes
= MIN(n
, max_blksz
- P2PHASE(woff
, max_blksz
));
675 tx_bytes
= uio
->uio_resid
;
676 error
= dmu_write_uio_dbuf(sa_get_db(zp
->z_sa_hdl
),
678 tx_bytes
-= uio
->uio_resid
;
681 ASSERT(xuio
== NULL
|| tx_bytes
== aiov
->iov_len
);
683 * If this is not a full block write, but we are
684 * extending the file past EOF and this data starts
685 * block-aligned, use assign_arcbuf(). Otherwise,
686 * write via dmu_write().
688 if (tx_bytes
< max_blksz
&& (!write_eof
||
689 aiov
->iov_base
!= abuf
->b_data
)) {
691 dmu_write(zsb
->z_os
, zp
->z_id
, woff
,
692 aiov
->iov_len
, aiov
->iov_base
, tx
);
693 dmu_return_arcbuf(abuf
);
694 xuio_stat_wbuf_copied();
696 ASSERT(xuio
|| tx_bytes
== max_blksz
);
697 dmu_assign_arcbuf(sa_get_db(zp
->z_sa_hdl
),
700 ASSERT(tx_bytes
<= uio
->uio_resid
);
701 uioskip(uio
, tx_bytes
);
703 /* XXX: Drop this, ARC update handled by zpl layer */
705 if (tx_bytes
&& vn_has_cached_data(ip
)) {
706 update_pages(ip
, woff
,
707 tx_bytes
, zsb
->z_os
, zp
->z_id
);
709 #endif /* HAVE_MMAP */
712 * If we made no progress, we're done. If we made even
713 * partial progress, update the znode and ZIL accordingly.
716 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_SIZE(zsb
),
717 (void *)&zp
->z_size
, sizeof (uint64_t), tx
);
724 * Clear Set-UID/Set-GID bits on successful write if not
725 * privileged and at least one of the excute bits is set.
727 * It would be nice to to this after all writes have
728 * been done, but that would still expose the ISUID/ISGID
729 * to another app after the partial write is committed.
731 * Note: we don't call zfs_fuid_map_id() here because
732 * user 0 is not an ephemeral uid.
734 mutex_enter(&zp
->z_acl_lock
);
735 if ((zp
->z_mode
& (S_IXUSR
| (S_IXUSR
>> 3) |
736 (S_IXUSR
>> 6))) != 0 &&
737 (zp
->z_mode
& (S_ISUID
| S_ISGID
)) != 0 &&
738 secpolicy_vnode_setid_retain(cr
,
739 (zp
->z_mode
& S_ISUID
) != 0 && zp
->z_uid
== 0) != 0) {
741 zp
->z_mode
&= ~(S_ISUID
| S_ISGID
);
742 newmode
= zp
->z_mode
;
743 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_MODE(zsb
),
744 (void *)&newmode
, sizeof (uint64_t), tx
);
746 mutex_exit(&zp
->z_acl_lock
);
748 zfs_tstamp_update_setup(zp
, CONTENT_MODIFIED
, mtime
, ctime
,
752 * Update the file size (zp_size) if it has changed;
753 * account for possible concurrent updates.
755 while ((end_size
= zp
->z_size
) < uio
->uio_loffset
) {
756 (void) atomic_cas_64(&zp
->z_size
, end_size
,
761 * If we are replaying and eof is non zero then force
762 * the file size to the specified eof. Note, there's no
763 * concurrency during replay.
765 if (zsb
->z_replay
&& zsb
->z_replay_eof
!= 0)
766 zp
->z_size
= zsb
->z_replay_eof
;
768 error
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, count
, tx
);
770 zfs_log_write(zilog
, tx
, TX_WRITE
, zp
, woff
, tx_bytes
, ioflag
);
775 ASSERT(tx_bytes
== nbytes
);
779 uio_prefaultpages(MIN(n
, max_blksz
), uio
);
782 zfs_range_unlock(rl
);
785 * If we're in replay mode, or we made no progress, return error.
786 * Otherwise, it's at least a partial write, so it's successful.
788 if (zsb
->z_replay
|| uio
->uio_resid
== start_resid
) {
793 if (ioflag
& (FSYNC
| FDSYNC
) ||
794 zsb
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
795 zil_commit(zilog
, zp
->z_id
);
797 zfs_inode_update(zp
);
801 EXPORT_SYMBOL(zfs_write
);
804 iput_async(struct inode
*ip
, taskq_t
*taskq
)
806 ASSERT(atomic_read(&ip
->i_count
) > 0);
807 if (atomic_read(&ip
->i_count
) == 1)
808 taskq_dispatch(taskq
, (task_func_t
*)iput
, ip
, TQ_SLEEP
);
814 zfs_get_done(zgd_t
*zgd
, int error
)
816 znode_t
*zp
= zgd
->zgd_private
;
817 objset_t
*os
= ZTOZSB(zp
)->z_os
;
820 dmu_buf_rele(zgd
->zgd_db
, zgd
);
822 zfs_range_unlock(zgd
->zgd_rl
);
825 * Release the vnode asynchronously as we currently have the
826 * txg stopped from syncing.
828 iput_async(ZTOI(zp
), dsl_pool_iput_taskq(dmu_objset_pool(os
)));
830 if (error
== 0 && zgd
->zgd_bp
)
831 zil_add_block(zgd
->zgd_zilog
, zgd
->zgd_bp
);
833 kmem_free(zgd
, sizeof (zgd_t
));
837 static int zil_fault_io
= 0;
841 * Get data to generate a TX_WRITE intent log record.
844 zfs_get_data(void *arg
, lr_write_t
*lr
, char *buf
, zio_t
*zio
)
847 objset_t
*os
= zsb
->z_os
;
849 uint64_t object
= lr
->lr_foid
;
850 uint64_t offset
= lr
->lr_offset
;
851 uint64_t size
= lr
->lr_length
;
852 blkptr_t
*bp
= &lr
->lr_blkptr
;
861 * Nothing to do if the file has been removed
863 if (zfs_zget(zsb
, object
, &zp
) != 0)
865 if (zp
->z_unlinked
) {
867 * Release the vnode asynchronously as we currently have the
868 * txg stopped from syncing.
870 iput_async(ZTOI(zp
), dsl_pool_iput_taskq(dmu_objset_pool(os
)));
874 zgd
= (zgd_t
*)kmem_zalloc(sizeof (zgd_t
), KM_SLEEP
);
875 zgd
->zgd_zilog
= zsb
->z_log
;
876 zgd
->zgd_private
= zp
;
879 * Write records come in two flavors: immediate and indirect.
880 * For small writes it's cheaper to store the data with the
881 * log record (immediate); for large writes it's cheaper to
882 * sync the data and get a pointer to it (indirect) so that
883 * we don't have to write the data twice.
885 if (buf
!= NULL
) { /* immediate write */
886 zgd
->zgd_rl
= zfs_range_lock(zp
, offset
, size
, RL_READER
);
887 /* test for truncation needs to be done while range locked */
888 if (offset
>= zp
->z_size
) {
891 error
= dmu_read(os
, object
, offset
, size
, buf
,
892 DMU_READ_NO_PREFETCH
);
894 ASSERT(error
== 0 || error
== ENOENT
);
895 } else { /* indirect write */
897 * Have to lock the whole block to ensure when it's
898 * written out and it's checksum is being calculated
899 * that no one can change the data. We need to re-check
900 * blocksize after we get the lock in case it's changed!
905 blkoff
= ISP2(size
) ? P2PHASE(offset
, size
) : offset
;
907 zgd
->zgd_rl
= zfs_range_lock(zp
, offset
, size
,
909 if (zp
->z_blksz
== size
)
912 zfs_range_unlock(zgd
->zgd_rl
);
914 /* test for truncation needs to be done while range locked */
915 if (lr
->lr_offset
>= zp
->z_size
)
924 error
= dmu_buf_hold(os
, object
, offset
, zgd
, &db
,
925 DMU_READ_NO_PREFETCH
);
931 ASSERT(db
->db_offset
== offset
);
932 ASSERT(db
->db_size
== size
);
934 error
= dmu_sync(zio
, lr
->lr_common
.lrc_txg
,
936 ASSERT(error
|| lr
->lr_length
<= zp
->z_blksz
);
939 * On success, we need to wait for the write I/O
940 * initiated by dmu_sync() to complete before we can
941 * release this dbuf. We will finish everything up
942 * in the zfs_get_done() callback.
947 if (error
== EALREADY
) {
948 lr
->lr_common
.lrc_txtype
= TX_WRITE2
;
954 zfs_get_done(zgd
, error
);
961 zfs_access(struct inode
*ip
, int mode
, int flag
, cred_t
*cr
)
963 znode_t
*zp
= ITOZ(ip
);
964 zfs_sb_t
*zsb
= ITOZSB(ip
);
970 if (flag
& V_ACE_MASK
)
971 error
= zfs_zaccess(zp
, mode
, flag
, B_FALSE
, cr
);
973 error
= zfs_zaccess_rwx(zp
, mode
, flag
, cr
);
978 EXPORT_SYMBOL(zfs_access
);
981 * Lookup an entry in a directory, or an extended attribute directory.
982 * If it exists, return a held inode reference for it.
984 * IN: dip - inode of directory to search.
985 * nm - name of entry to lookup.
986 * flags - LOOKUP_XATTR set if looking for an attribute.
987 * cr - credentials of caller.
988 * direntflags - directory lookup flags
989 * realpnp - returned pathname.
991 * OUT: ipp - inode of located entry, NULL if not found.
993 * RETURN: 0 if success
994 * error code if failure
1001 zfs_lookup(struct inode
*dip
, char *nm
, struct inode
**ipp
, int flags
,
1002 cred_t
*cr
, int *direntflags
, pathname_t
*realpnp
)
1004 znode_t
*zdp
= ITOZ(dip
);
1005 zfs_sb_t
*zsb
= ITOZSB(dip
);
1009 if (!(flags
& (LOOKUP_XATTR
| FIGNORECASE
))) {
1011 if (!S_ISDIR(dip
->i_mode
)) {
1013 } else if (zdp
->z_sa_hdl
== NULL
) {
1017 if (nm
[0] == 0 || (nm
[0] == '.' && nm
[1] == '\0')) {
1018 error
= zfs_fastaccesschk_execute(zdp
, cr
);
1027 vnode_t
*tvp
= dnlc_lookup(dvp
, nm
);
1030 error
= zfs_fastaccesschk_execute(zdp
, cr
);
1035 if (tvp
== DNLC_NO_VNODE
) {
1040 return (specvp_check(vpp
, cr
));
1043 #endif /* HAVE_DNLC */
1052 if (flags
& LOOKUP_XATTR
) {
1054 * If the xattr property is off, refuse the lookup request.
1056 if (!(zsb
->z_flags
& ZSB_XATTR_USER
)) {
1062 * We don't allow recursive attributes..
1063 * Maybe someday we will.
1065 if (zdp
->z_pflags
& ZFS_XATTR
) {
1070 if ((error
= zfs_get_xattrdir(zdp
, ipp
, cr
, flags
))) {
1076 * Do we have permission to get into attribute directory?
1079 if ((error
= zfs_zaccess(ITOZ(*ipp
), ACE_EXECUTE
, 0,
1089 if (!S_ISDIR(dip
->i_mode
)) {
1095 * Check accessibility of directory.
1098 if ((error
= zfs_zaccess(zdp
, ACE_EXECUTE
, 0, B_FALSE
, cr
))) {
1103 if (zsb
->z_utf8
&& u8_validate(nm
, strlen(nm
),
1104 NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
1109 error
= zfs_dirlook(zdp
, nm
, ipp
, flags
, direntflags
, realpnp
);
1110 if ((error
== 0) && (*ipp
))
1111 zfs_inode_update(ITOZ(*ipp
));
1116 EXPORT_SYMBOL(zfs_lookup
);
1119 * Attempt to create a new entry in a directory. If the entry
1120 * already exists, truncate the file if permissible, else return
1121 * an error. Return the ip of the created or trunc'd file.
1123 * IN: dip - inode of directory to put new file entry in.
1124 * name - name of new file entry.
1125 * vap - attributes of new file.
1126 * excl - flag indicating exclusive or non-exclusive mode.
1127 * mode - mode to open file with.
1128 * cr - credentials of caller.
1129 * flag - large file flag [UNUSED].
1130 * vsecp - ACL to be set
1132 * OUT: ipp - inode of created or trunc'd entry.
1134 * RETURN: 0 if success
1135 * error code if failure
1138 * dip - ctime|mtime updated if new entry created
1139 * ip - ctime|mtime always, atime if new
1144 zfs_create(struct inode
*dip
, char *name
, vattr_t
*vap
, int excl
,
1145 int mode
, struct inode
**ipp
, cred_t
*cr
, int flag
, vsecattr_t
*vsecp
)
1147 znode_t
*zp
, *dzp
= ITOZ(dip
);
1148 zfs_sb_t
*zsb
= ITOZSB(dip
);
1156 zfs_acl_ids_t acl_ids
;
1157 boolean_t fuid_dirtied
;
1158 boolean_t have_acl
= B_FALSE
;
1161 * If we have an ephemeral id, ACL, or XVATTR then
1162 * make sure file system is at proper version
1168 if (zsb
->z_use_fuids
== B_FALSE
&&
1169 (vsecp
|| IS_EPHEMERAL(uid
) || IS_EPHEMERAL(gid
)))
1177 if (zsb
->z_utf8
&& u8_validate(name
, strlen(name
),
1178 NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
1184 if (vap
->va_mask
& AT_XVATTR
) {
1185 if ((error
= secpolicy_xvattr((xvattr_t
*)vap
,
1186 crgetuid(cr
), cr
, vap
->va_mode
)) != 0) {
1191 #endif /* HAVE_XVATTR */
1195 if (*name
== '\0') {
1197 * Null component name refers to the directory itself.
1204 /* possible igrab(zp) */
1207 if (flag
& FIGNORECASE
)
1210 error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
,
1214 zfs_acl_ids_free(&acl_ids
);
1215 if (strcmp(name
, "..") == 0)
1226 * Create a new file object and update the directory
1229 if ((error
= zfs_zaccess(dzp
, ACE_ADD_FILE
, 0, B_FALSE
, cr
))) {
1231 zfs_acl_ids_free(&acl_ids
);
1236 * We only support the creation of regular files in
1237 * extended attribute directories.
1240 if ((dzp
->z_pflags
& ZFS_XATTR
) && !S_ISREG(vap
->va_mode
)) {
1242 zfs_acl_ids_free(&acl_ids
);
1247 if (!have_acl
&& (error
= zfs_acl_ids_create(dzp
, 0, vap
,
1248 cr
, vsecp
, &acl_ids
)) != 0)
1252 if (zfs_acl_ids_overquota(zsb
, &acl_ids
)) {
1253 zfs_acl_ids_free(&acl_ids
);
1258 tx
= dmu_tx_create(os
);
1260 dmu_tx_hold_sa_create(tx
, acl_ids
.z_aclp
->z_acl_bytes
+
1261 ZFS_SA_BASE_ATTR_SIZE
);
1263 fuid_dirtied
= zsb
->z_fuid_dirty
;
1265 zfs_fuid_txhold(zsb
, tx
);
1266 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, name
);
1267 dmu_tx_hold_sa(tx
, dzp
->z_sa_hdl
, B_FALSE
);
1268 if (!zsb
->z_use_sa
&&
1269 acl_ids
.z_aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
1270 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
,
1271 0, acl_ids
.z_aclp
->z_acl_bytes
);
1273 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
1275 zfs_dirent_unlock(dl
);
1276 if (error
== ERESTART
) {
1281 zfs_acl_ids_free(&acl_ids
);
1286 zfs_mknode(dzp
, vap
, tx
, cr
, 0, &zp
, &acl_ids
);
1289 zfs_fuid_sync(zsb
, tx
);
1291 (void) zfs_link_create(dl
, zp
, tx
, ZNEW
);
1292 txtype
= zfs_log_create_txtype(Z_FILE
, vsecp
, vap
);
1293 if (flag
& FIGNORECASE
)
1295 zfs_log_create(zilog
, tx
, txtype
, dzp
, zp
, name
,
1296 vsecp
, acl_ids
.z_fuidp
, vap
);
1297 zfs_acl_ids_free(&acl_ids
);
1300 int aflags
= (flag
& FAPPEND
) ? V_APPEND
: 0;
1303 zfs_acl_ids_free(&acl_ids
);
1307 * A directory entry already exists for this name.
1310 * Can't truncate an existing file if in exclusive mode.
1317 * Can't open a directory for writing.
1319 if (S_ISDIR(ZTOI(zp
)->i_mode
)) {
1324 * Verify requested access to file.
1326 if (mode
&& (error
= zfs_zaccess_rwx(zp
, mode
, aflags
, cr
))) {
1330 mutex_enter(&dzp
->z_lock
);
1332 mutex_exit(&dzp
->z_lock
);
1335 * Truncate regular files if requested.
1337 if (S_ISREG(ZTOI(zp
)->i_mode
) &&
1338 (vap
->va_mask
& ATTR_SIZE
) && (vap
->va_size
== 0)) {
1339 /* we can't hold any locks when calling zfs_freesp() */
1340 zfs_dirent_unlock(dl
);
1342 error
= zfs_freesp(zp
, 0, 0, mode
, TRUE
);
1348 zfs_dirent_unlock(dl
);
1354 zfs_inode_update(dzp
);
1355 zfs_inode_update(zp
);
1359 if (zsb
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
1360 zil_commit(zilog
, 0);
1365 EXPORT_SYMBOL(zfs_create
);
1368 * Remove an entry from a directory.
1370 * IN: dip - inode of directory to remove entry from.
1371 * name - name of entry to remove.
1372 * cr - credentials of caller.
1374 * RETURN: 0 if success
1375 * error code if failure
1379 * ip - ctime (if nlink > 0)
1382 uint64_t null_xattr
= 0;
1386 zfs_remove(struct inode
*dip
, char *name
, cred_t
*cr
)
1388 znode_t
*zp
, *dzp
= ITOZ(dip
);
1391 zfs_sb_t
*zsb
= ITOZSB(dip
);
1394 uint64_t xattr_obj_unlinked
= 0;
1400 pathname_t
*realnmp
= NULL
;
1401 #ifdef HAVE_PN_UTILS
1403 #endif /* HAVE_PN_UTILS */
1411 #ifdef HAVE_PN_UTILS
1412 if (flags
& FIGNORECASE
) {
1417 #endif /* HAVE_PN_UTILS */
1423 * Attempt to lock directory; fail if entry doesn't exist.
1425 if ((error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
,
1427 #ifdef HAVE_PN_UTILS
1430 #endif /* HAVE_PN_UTILS */
1437 if ((error
= zfs_zaccess_delete(dzp
, zp
, cr
))) {
1442 * Need to use rmdir for removing directories.
1444 if (S_ISDIR(ip
->i_mode
)) {
1451 dnlc_remove(dvp
, realnmp
->pn_buf
);
1453 dnlc_remove(dvp
, name
);
1454 #endif /* HAVE_DNLC */
1457 * We never delete the znode and always place it in the unlinked
1458 * set. The dentry cache will always hold the last reference and
1459 * is responsible for safely freeing the znode.
1462 tx
= dmu_tx_create(zsb
->z_os
);
1463 dmu_tx_hold_zap(tx
, dzp
->z_id
, FALSE
, name
);
1464 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
1465 zfs_sa_upgrade_txholds(tx
, zp
);
1466 zfs_sa_upgrade_txholds(tx
, dzp
);
1468 /* are there any extended attributes? */
1469 error
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_XATTR(zsb
),
1470 &xattr_obj
, sizeof (xattr_obj
));
1471 if (error
== 0 && xattr_obj
) {
1472 error
= zfs_zget(zsb
, xattr_obj
, &xzp
);
1473 ASSERT3U(error
, ==, 0);
1474 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_TRUE
);
1475 dmu_tx_hold_sa(tx
, xzp
->z_sa_hdl
, B_FALSE
);
1478 /* charge as an update -- would be nice not to charge at all */
1479 dmu_tx_hold_zap(tx
, zsb
->z_unlinkedobj
, FALSE
, NULL
);
1481 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
1483 zfs_dirent_unlock(dl
);
1487 if (error
== ERESTART
) {
1492 #ifdef HAVE_PN_UTILS
1495 #endif /* HAVE_PN_UTILS */
1502 * Remove the directory entry.
1504 error
= zfs_link_destroy(dl
, zp
, tx
, zflg
, &unlinked
);
1513 * Hold z_lock so that we can make sure that the ACL obj
1514 * hasn't changed. Could have been deleted due to
1517 mutex_enter(&zp
->z_lock
);
1518 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_XATTR(zsb
),
1519 &xattr_obj_unlinked
, sizeof (xattr_obj_unlinked
));
1520 mutex_exit(&zp
->z_lock
);
1521 zfs_unlinked_add(zp
, tx
);
1525 #ifdef HAVE_PN_UTILS
1526 if (flags
& FIGNORECASE
)
1528 #endif /* HAVE_PN_UTILS */
1529 zfs_log_remove(zilog
, tx
, txtype
, dzp
, name
, obj
);
1533 #ifdef HAVE_PN_UTILS
1536 #endif /* HAVE_PN_UTILS */
1538 zfs_dirent_unlock(dl
);
1539 zfs_inode_update(dzp
);
1540 zfs_inode_update(zp
);
1542 zfs_inode_update(xzp
);
1548 if (zsb
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
1549 zil_commit(zilog
, 0);
1554 EXPORT_SYMBOL(zfs_remove
);
1557 * Create a new directory and insert it into dip using the name
1558 * provided. Return a pointer to the inserted directory.
1560 * IN: dip - inode of directory to add subdir to.
1561 * dirname - name of new directory.
1562 * vap - attributes of new directory.
1563 * cr - credentials of caller.
1564 * vsecp - ACL to be set
1566 * OUT: ipp - inode of created directory.
1568 * RETURN: 0 if success
1569 * error code if failure
1572 * dip - ctime|mtime updated
1573 * ipp - ctime|mtime|atime updated
1577 zfs_mkdir(struct inode
*dip
, char *dirname
, vattr_t
*vap
, struct inode
**ipp
,
1578 cred_t
*cr
, int flags
, vsecattr_t
*vsecp
)
1580 znode_t
*zp
, *dzp
= ITOZ(dip
);
1581 zfs_sb_t
*zsb
= ITOZSB(dip
);
1589 gid_t gid
= crgetgid(cr
);
1590 zfs_acl_ids_t acl_ids
;
1591 boolean_t fuid_dirtied
;
1593 ASSERT(S_ISDIR(vap
->va_mode
));
1596 * If we have an ephemeral id, ACL, or XVATTR then
1597 * make sure file system is at proper version
1601 if (zsb
->z_use_fuids
== B_FALSE
&&
1602 (vsecp
|| IS_EPHEMERAL(uid
) || IS_EPHEMERAL(gid
)))
1609 if (dzp
->z_pflags
& ZFS_XATTR
) {
1614 if (zsb
->z_utf8
&& u8_validate(dirname
,
1615 strlen(dirname
), NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
1619 if (flags
& FIGNORECASE
)
1623 if (vap
->va_mask
& AT_XVATTR
) {
1624 if ((error
= secpolicy_xvattr((xvattr_t
*)vap
,
1625 crgetuid(cr
), cr
, vap
->va_mode
)) != 0) {
1630 #endif /* HAVE_XVATTR */
1632 if ((error
= zfs_acl_ids_create(dzp
, 0, vap
, cr
,
1633 vsecp
, &acl_ids
)) != 0) {
1638 * First make sure the new directory doesn't exist.
1640 * Existence is checked first to make sure we don't return
1641 * EACCES instead of EEXIST which can cause some applications
1647 if ((error
= zfs_dirent_lock(&dl
, dzp
, dirname
, &zp
, zf
,
1649 zfs_acl_ids_free(&acl_ids
);
1654 if ((error
= zfs_zaccess(dzp
, ACE_ADD_SUBDIRECTORY
, 0, B_FALSE
, cr
))) {
1655 zfs_acl_ids_free(&acl_ids
);
1656 zfs_dirent_unlock(dl
);
1661 if (zfs_acl_ids_overquota(zsb
, &acl_ids
)) {
1662 zfs_acl_ids_free(&acl_ids
);
1663 zfs_dirent_unlock(dl
);
1669 * Add a new entry to the directory.
1671 tx
= dmu_tx_create(zsb
->z_os
);
1672 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, dirname
);
1673 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, FALSE
, NULL
);
1674 fuid_dirtied
= zsb
->z_fuid_dirty
;
1676 zfs_fuid_txhold(zsb
, tx
);
1677 if (!zsb
->z_use_sa
&& acl_ids
.z_aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
1678 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
, 0,
1679 acl_ids
.z_aclp
->z_acl_bytes
);
1682 dmu_tx_hold_sa_create(tx
, acl_ids
.z_aclp
->z_acl_bytes
+
1683 ZFS_SA_BASE_ATTR_SIZE
);
1685 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
1687 zfs_dirent_unlock(dl
);
1688 if (error
== ERESTART
) {
1693 zfs_acl_ids_free(&acl_ids
);
1702 zfs_mknode(dzp
, vap
, tx
, cr
, 0, &zp
, &acl_ids
);
1705 zfs_fuid_sync(zsb
, tx
);
1708 * Now put new name in parent dir.
1710 (void) zfs_link_create(dl
, zp
, tx
, ZNEW
);
1714 txtype
= zfs_log_create_txtype(Z_DIR
, vsecp
, vap
);
1715 if (flags
& FIGNORECASE
)
1717 zfs_log_create(zilog
, tx
, txtype
, dzp
, zp
, dirname
, vsecp
,
1718 acl_ids
.z_fuidp
, vap
);
1720 zfs_acl_ids_free(&acl_ids
);
1724 zfs_dirent_unlock(dl
);
1726 if (zsb
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
1727 zil_commit(zilog
, 0);
1729 zfs_inode_update(dzp
);
1730 zfs_inode_update(zp
);
1734 EXPORT_SYMBOL(zfs_mkdir
);
1737 * Remove a directory subdir entry. If the current working
1738 * directory is the same as the subdir to be removed, the
1741 * IN: dip - inode of directory to remove from.
1742 * name - name of directory to be removed.
1743 * cwd - inode of current working directory.
1744 * cr - credentials of caller.
1745 * flags - case flags
1747 * RETURN: 0 if success
1748 * error code if failure
1751 * dip - ctime|mtime updated
1755 zfs_rmdir(struct inode
*dip
, char *name
, struct inode
*cwd
, cred_t
*cr
,
1758 znode_t
*dzp
= ITOZ(dip
);
1761 zfs_sb_t
*zsb
= ITOZSB(dip
);
1772 if (flags
& FIGNORECASE
)
1778 * Attempt to lock directory; fail if entry doesn't exist.
1780 if ((error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
,
1788 if ((error
= zfs_zaccess_delete(dzp
, zp
, cr
))) {
1792 if (!S_ISDIR(ip
->i_mode
)) {
1803 * Grab a lock on the directory to make sure that noone is
1804 * trying to add (or lookup) entries while we are removing it.
1806 rw_enter(&zp
->z_name_lock
, RW_WRITER
);
1809 * Grab a lock on the parent pointer to make sure we play well
1810 * with the treewalk and directory rename code.
1812 rw_enter(&zp
->z_parent_lock
, RW_WRITER
);
1814 tx
= dmu_tx_create(zsb
->z_os
);
1815 dmu_tx_hold_zap(tx
, dzp
->z_id
, FALSE
, name
);
1816 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
1817 dmu_tx_hold_zap(tx
, zsb
->z_unlinkedobj
, FALSE
, NULL
);
1818 zfs_sa_upgrade_txholds(tx
, zp
);
1819 zfs_sa_upgrade_txholds(tx
, dzp
);
1820 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
1822 rw_exit(&zp
->z_parent_lock
);
1823 rw_exit(&zp
->z_name_lock
);
1824 zfs_dirent_unlock(dl
);
1826 if (error
== ERESTART
) {
1836 error
= zfs_link_destroy(dl
, zp
, tx
, zflg
, NULL
);
1839 uint64_t txtype
= TX_RMDIR
;
1840 if (flags
& FIGNORECASE
)
1842 zfs_log_remove(zilog
, tx
, txtype
, dzp
, name
, ZFS_NO_OBJECT
);
1847 rw_exit(&zp
->z_parent_lock
);
1848 rw_exit(&zp
->z_name_lock
);
1850 zfs_dirent_unlock(dl
);
1854 if (zsb
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
1855 zil_commit(zilog
, 0);
1857 zfs_inode_update(dzp
);
1858 zfs_inode_update(zp
);
1862 EXPORT_SYMBOL(zfs_rmdir
);
1865 * Read as many directory entries as will fit into the provided
1866 * dirent buffer from the given directory cursor position.
1868 * IN: ip - inode of directory to read.
1869 * dirent - buffer for directory entries.
1871 * OUT: dirent - filler buffer of directory entries.
1873 * RETURN: 0 if success
1874 * error code if failure
1877 * ip - atime updated
1879 * Note that the low 4 bits of the cookie returned by zap is always zero.
1880 * This allows us to use the low range for "special" directory entries:
1881 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
1882 * we use the offset 2 for the '.zfs' directory.
1886 zfs_readdir(struct inode
*ip
, void *dirent
, filldir_t filldir
,
1887 loff_t
*pos
, cred_t
*cr
)
1889 znode_t
*zp
= ITOZ(ip
);
1890 zfs_sb_t
*zsb
= ITOZSB(ip
);
1893 zap_attribute_t zap
;
1903 if ((error
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_PARENT(zsb
),
1904 &parent
, sizeof (parent
))) != 0)
1908 * Quit if directory has been removed (posix)
1915 prefetch
= zp
->z_zn_prefetch
;
1918 * Initialize the iterator cursor.
1922 * Start iteration from the beginning of the directory.
1924 zap_cursor_init(&zc
, os
, zp
->z_id
);
1927 * The offset is a serialized cursor.
1929 zap_cursor_init_serialized(&zc
, os
, zp
->z_id
, *pos
);
1933 * Transform to file-system independent format
1940 * Special case `.', `..', and `.zfs'.
1943 (void) strcpy(zap
.za_name
, ".");
1944 zap
.za_normalization_conflict
= 0;
1946 } else if (*pos
== 1) {
1947 (void) strcpy(zap
.za_name
, "..");
1948 zap
.za_normalization_conflict
= 0;
1950 } else if (*pos
== 2 && zfs_show_ctldir(zp
)) {
1951 (void) strcpy(zap
.za_name
, ZFS_CTLDIR_NAME
);
1952 zap
.za_normalization_conflict
= 0;
1953 objnum
= ZFSCTL_INO_ROOT
;
1958 if ((error
= zap_cursor_retrieve(&zc
, &zap
))) {
1959 if (error
== ENOENT
)
1965 if (zap
.za_integer_length
!= 8 ||
1966 zap
.za_num_integers
!= 1) {
1967 cmn_err(CE_WARN
, "zap_readdir: bad directory "
1968 "entry, obj = %lld, offset = %lld\n",
1969 (u_longlong_t
)zp
->z_id
,
1970 (u_longlong_t
)*pos
);
1975 objnum
= ZFS_DIRENT_OBJ(zap
.za_first_integer
);
1977 done
= filldir(dirent
, zap
.za_name
, strlen(zap
.za_name
),
1978 zap_cursor_serialize(&zc
), objnum
, 0);
1983 /* Prefetch znode */
1985 dmu_prefetch(os
, objnum
, 0, 0);
1989 zap_cursor_advance(&zc
);
1990 *pos
= zap_cursor_serialize(&zc
);
1995 zp
->z_zn_prefetch
= B_FALSE
; /* a lookup will re-enable pre-fetching */
1998 zap_cursor_fini(&zc
);
1999 if (error
== ENOENT
)
2002 ZFS_ACCESSTIME_STAMP(zsb
, zp
);
2003 zfs_inode_update(zp
);
2010 EXPORT_SYMBOL(zfs_readdir
);
2012 ulong_t zfs_fsync_sync_cnt
= 4;
2015 zfs_fsync(struct inode
*ip
, int syncflag
, cred_t
*cr
)
2017 znode_t
*zp
= ITOZ(ip
);
2018 zfs_sb_t
*zsb
= ITOZSB(ip
);
2020 (void) tsd_set(zfs_fsyncer_key
, (void *)zfs_fsync_sync_cnt
);
2022 if (zsb
->z_os
->os_sync
!= ZFS_SYNC_DISABLED
) {
2025 zil_commit(zsb
->z_log
, zp
->z_id
);
2030 EXPORT_SYMBOL(zfs_fsync
);
2034 * Get the requested file attributes and place them in the provided
2037 * IN: ip - inode of file.
2038 * stat - kstat structure to fill in.
2039 * flags - ATTR_NOACLCHECK (CIFS server context)
2040 * cr - credentials of caller.
2042 * OUT: stat - filled in kstat values.
2046 zfs_getattr(struct inode
*ip
, struct kstat
*stat
, int flags
, cred_t
*cr
)
2048 znode_t
*zp
= ITOZ(ip
);
2049 zfs_sb_t
*zsb
= ITOZSB(ip
);
2052 uint64_t mtime
[2], ctime
[2];
2054 boolean_t skipaclchk
= (flags
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
2055 sa_bulk_attr_t bulk
[2];
2061 zfs_fuid_map_ids(zp
, cr
, &stat
->uid
, &stat
->gid
);
2063 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zsb
), NULL
, &mtime
, 16);
2064 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zsb
), NULL
, &ctime
, 16);
2066 if ((error
= sa_bulk_lookup(zp
->z_sa_hdl
, bulk
, count
)) != 0) {
2072 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2073 * Also, if we are the owner don't bother, since owner should
2074 * always be allowed to read basic attributes of file.
2076 if (!(zp
->z_pflags
& ZFS_ACL_TRIVIAL
) &&
2077 (stat
->uid
!= crgetuid(cr
))) {
2078 if ((error
= zfs_zaccess(zp
, ACE_READ_ATTRIBUTES
, 0,
2086 * Return all attributes. It's cheaper to provide the answer
2087 * than to determine whether we were asked the question.
2090 mutex_enter(&zp
->z_lock
);
2091 stat
->ino
= ip
->i_ino
;
2092 stat
->mode
= zp
->z_mode
;
2093 stat
->uid
= zp
->z_uid
;
2094 stat
->gid
= zp
->z_gid
;
2095 if ((zp
->z_id
== zsb
->z_root
) && zfs_show_ctldir(zp
))
2096 links
= zp
->z_links
+ 1;
2098 links
= zp
->z_links
;
2099 stat
->nlink
= MIN(links
, ZFS_LINK_MAX
);
2100 stat
->size
= i_size_read(ip
);
2101 stat
->rdev
= ip
->i_rdev
;
2102 stat
->dev
= ip
->i_rdev
;
2104 ZFS_TIME_DECODE(&stat
->atime
, zp
->z_atime
);
2105 ZFS_TIME_DECODE(&stat
->mtime
, mtime
);
2106 ZFS_TIME_DECODE(&stat
->ctime
, ctime
);
2108 mutex_exit(&zp
->z_lock
);
2110 sa_object_size(zp
->z_sa_hdl
, &blksz
, &stat
->blocks
);
2111 stat
->blksize
= (1 << ip
->i_blkbits
);
2113 if (zp
->z_blksz
== 0) {
2115 * Block size hasn't been set; suggest maximal I/O transfers.
2117 stat
->blksize
= zsb
->z_max_blksz
;
2123 EXPORT_SYMBOL(zfs_getattr
);
2126 * Set the file attributes to the values contained in the
2129 * IN: ip - inode of file to be modified.
2130 * vap - new attribute values.
2131 * If AT_XVATTR set, then optional attrs are being set
2132 * flags - ATTR_UTIME set if non-default time values provided.
2133 * - ATTR_NOACLCHECK (CIFS context only).
2134 * cr - credentials of caller.
2136 * RETURN: 0 if success
2137 * error code if failure
2140 * ip - ctime updated, mtime updated if size changed.
2144 zfs_setattr(struct inode
*ip
, struct iattr
*attr
, int flags
, cred_t
*cr
)
2146 znode_t
*zp
= ITOZ(ip
);
2147 zfs_sb_t
*zsb
= ITOZSB(ip
);
2151 uint_t mask
= attr
->ia_valid
;
2155 uint64_t new_uid
, new_gid
;
2157 uint64_t mtime
[2], ctime
[2];
2159 int need_policy
= FALSE
;
2161 zfs_fuid_info_t
*fuidp
= NULL
;
2162 boolean_t skipaclchk
= (flags
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
2163 zfs_acl_t
*aclp
= NULL
;
2164 boolean_t fuid_dirtied
= B_FALSE
;
2165 sa_bulk_attr_t bulk
[7], xattr_bulk
[7];
2166 int count
= 0, xattr_count
= 0;
2177 * Make sure that if we have ephemeral uid/gid or xvattr specified
2178 * that file system is at proper version level
2180 if (zsb
->z_use_fuids
== B_FALSE
&&
2181 (((mask
& ATTR_UID
) && IS_EPHEMERAL(attr
->ia_uid
)) ||
2182 ((mask
& ATTR_GID
) && IS_EPHEMERAL(attr
->ia_gid
)))) {
2187 if (mask
& ATTR_SIZE
&& S_ISDIR(ip
->i_mode
)) {
2192 if (mask
& ATTR_SIZE
&& !S_ISREG(ip
->i_mode
) && !S_ISFIFO(ip
->i_mode
)) {
2197 if ((mask
& ATTR_SIZE
) && (zp
->z_pflags
& ZFS_READONLY
)) {
2206 /* Can this be moved to before the top label? */
2207 if (zsb
->z_vfs
->mnt_flags
& MNT_READONLY
) {
2213 * First validate permissions
2216 if (mask
& ATTR_SIZE
) {
2217 err
= zfs_zaccess(zp
, ACE_WRITE_DATA
, 0, skipaclchk
, cr
);
2223 * XXX - Note, we are not providing any open
2224 * mode flags here (like FNDELAY), so we may
2225 * block if there are locks present... this
2226 * should be addressed in openat().
2228 /* XXX - would it be OK to generate a log record here? */
2229 err
= zfs_freesp(zp
, attr
->ia_size
, 0, 0, FALSE
);
2235 /* Careful negative Linux return code here */
2236 err
= -vmtruncate(ip
, attr
->ia_size
);
2243 if (mask
& (ATTR_UID
|ATTR_GID
)) {
2244 int idmask
= (mask
& (ATTR_UID
|ATTR_GID
));
2249 * NOTE: even if a new mode is being set,
2250 * we may clear S_ISUID/S_ISGID bits.
2253 if (!(mask
& ATTR_MODE
))
2254 attr
->ia_mode
= zp
->z_mode
;
2257 * Take ownership or chgrp to group we are a member of
2260 take_owner
= (mask
& ATTR_UID
) &&
2261 (attr
->ia_uid
== crgetuid(cr
));
2262 take_group
= (mask
& ATTR_GID
) &&
2263 zfs_groupmember(zsb
, attr
->ia_gid
, cr
);
2266 * If both AT_UID and AT_GID are set then take_owner and
2267 * take_group must both be set in order to allow taking
2270 * Otherwise, send the check through secpolicy_vnode_setattr()
2274 if (((idmask
== (ATTR_UID
|ATTR_GID
)) &&
2275 take_owner
&& take_group
) ||
2276 ((idmask
== ATTR_UID
) && take_owner
) ||
2277 ((idmask
== ATTR_GID
) && take_group
)) {
2278 if (zfs_zaccess(zp
, ACE_WRITE_OWNER
, 0,
2279 skipaclchk
, cr
) == 0) {
2281 * Remove setuid/setgid for non-privileged users
2283 secpolicy_setid_clear(attr
, cr
);
2284 trim_mask
= (mask
& (ATTR_UID
|ATTR_GID
));
2293 mutex_enter(&zp
->z_lock
);
2294 oldva
.va_mode
= zp
->z_mode
;
2295 zfs_fuid_map_ids(zp
, cr
, &oldva
.va_uid
, &oldva
.va_gid
);
2297 mutex_exit(&zp
->z_lock
);
2299 if (mask
& ATTR_MODE
) {
2300 if (zfs_zaccess(zp
, ACE_WRITE_ACL
, 0, skipaclchk
, cr
) == 0) {
2301 err
= secpolicy_setid_setsticky_clear(ip
, attr
,
2307 trim_mask
|= ATTR_MODE
;
2315 * If trim_mask is set then take ownership
2316 * has been granted or write_acl is present and user
2317 * has the ability to modify mode. In that case remove
2318 * UID|GID and or MODE from mask so that
2319 * secpolicy_vnode_setattr() doesn't revoke it.
2323 saved_mask
= attr
->ia_valid
;
2324 attr
->ia_valid
&= ~trim_mask
;
2326 err
= secpolicy_vnode_setattr(cr
, ip
, attr
, &oldva
, flags
,
2327 (int (*)(void *, int, cred_t
*))zfs_zaccess_unix
, zp
);
2334 attr
->ia_valid
|= saved_mask
;
2338 * secpolicy_vnode_setattr, or take ownership may have
2341 mask
= attr
->ia_valid
;
2343 if ((mask
& (ATTR_UID
| ATTR_GID
))) {
2344 err
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_XATTR(zsb
),
2345 &xattr_obj
, sizeof (xattr_obj
));
2347 if (err
== 0 && xattr_obj
) {
2348 err
= zfs_zget(ZTOZSB(zp
), xattr_obj
, &attrzp
);
2352 if (mask
& ATTR_UID
) {
2353 new_uid
= zfs_fuid_create(zsb
,
2354 (uint64_t)attr
->ia_uid
, cr
, ZFS_OWNER
, &fuidp
);
2355 if (new_uid
!= zp
->z_uid
&&
2356 zfs_fuid_overquota(zsb
, B_FALSE
, new_uid
)) {
2364 if (mask
& ATTR_GID
) {
2365 new_gid
= zfs_fuid_create(zsb
, (uint64_t)attr
->ia_gid
,
2366 cr
, ZFS_GROUP
, &fuidp
);
2367 if (new_gid
!= zp
->z_gid
&&
2368 zfs_fuid_overquota(zsb
, B_TRUE
, new_gid
)) {
2376 tx
= dmu_tx_create(zsb
->z_os
);
2378 if (mask
& ATTR_MODE
) {
2379 uint64_t pmode
= zp
->z_mode
;
2381 new_mode
= (pmode
& S_IFMT
) | (attr
->ia_mode
& ~S_IFMT
);
2383 zfs_acl_chmod_setattr(zp
, &aclp
, new_mode
);
2385 mutex_enter(&zp
->z_lock
);
2386 if (!zp
->z_is_sa
&& ((acl_obj
= zfs_external_acl(zp
)) != 0)) {
2388 * Are we upgrading ACL from old V0 format
2391 if (zsb
->z_version
>= ZPL_VERSION_FUID
&&
2392 zfs_znode_acl_version(zp
) ==
2393 ZFS_ACL_VERSION_INITIAL
) {
2394 dmu_tx_hold_free(tx
, acl_obj
, 0,
2396 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
,
2397 0, aclp
->z_acl_bytes
);
2399 dmu_tx_hold_write(tx
, acl_obj
, 0,
2402 } else if (!zp
->z_is_sa
&& aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
2403 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
,
2404 0, aclp
->z_acl_bytes
);
2406 mutex_exit(&zp
->z_lock
);
2407 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_TRUE
);
2409 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
2413 dmu_tx_hold_sa(tx
, attrzp
->z_sa_hdl
, B_FALSE
);
2416 fuid_dirtied
= zsb
->z_fuid_dirty
;
2418 zfs_fuid_txhold(zsb
, tx
);
2420 zfs_sa_upgrade_txholds(tx
, zp
);
2422 err
= dmu_tx_assign(tx
, TXG_NOWAIT
);
2424 if (err
== ERESTART
)
2431 * Set each attribute requested.
2432 * We group settings according to the locks they need to acquire.
2434 * Note: you cannot set ctime directly, although it will be
2435 * updated as a side-effect of calling this function.
2439 if (mask
& (ATTR_UID
|ATTR_GID
|ATTR_MODE
))
2440 mutex_enter(&zp
->z_acl_lock
);
2441 mutex_enter(&zp
->z_lock
);
2443 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zsb
), NULL
,
2444 &zp
->z_pflags
, sizeof (zp
->z_pflags
));
2447 if (mask
& (ATTR_UID
|ATTR_GID
|ATTR_MODE
))
2448 mutex_enter(&attrzp
->z_acl_lock
);
2449 mutex_enter(&attrzp
->z_lock
);
2450 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
2451 SA_ZPL_FLAGS(zsb
), NULL
, &attrzp
->z_pflags
,
2452 sizeof (attrzp
->z_pflags
));
2455 if (mask
& (ATTR_UID
|ATTR_GID
)) {
2457 if (mask
& ATTR_UID
) {
2458 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_UID(zsb
), NULL
,
2459 &new_uid
, sizeof (new_uid
));
2460 zp
->z_uid
= new_uid
;
2462 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
2463 SA_ZPL_UID(zsb
), NULL
, &new_uid
,
2465 attrzp
->z_uid
= new_uid
;
2469 if (mask
& ATTR_GID
) {
2470 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_GID(zsb
),
2471 NULL
, &new_gid
, sizeof (new_gid
));
2472 zp
->z_gid
= new_gid
;
2474 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
2475 SA_ZPL_GID(zsb
), NULL
, &new_gid
,
2477 attrzp
->z_gid
= new_gid
;
2480 if (!(mask
& ATTR_MODE
)) {
2481 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MODE(zsb
),
2482 NULL
, &new_mode
, sizeof (new_mode
));
2483 new_mode
= zp
->z_mode
;
2485 err
= zfs_acl_chown_setattr(zp
);
2488 err
= zfs_acl_chown_setattr(attrzp
);
2493 if (mask
& ATTR_MODE
) {
2494 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MODE(zsb
), NULL
,
2495 &new_mode
, sizeof (new_mode
));
2496 zp
->z_mode
= new_mode
;
2497 ASSERT3U((uintptr_t)aclp
, !=, NULL
);
2498 err
= zfs_aclset_common(zp
, aclp
, cr
, tx
);
2499 ASSERT3U(err
, ==, 0);
2500 if (zp
->z_acl_cached
)
2501 zfs_acl_free(zp
->z_acl_cached
);
2502 zp
->z_acl_cached
= aclp
;
2507 if (mask
& ATTR_ATIME
) {
2508 ZFS_TIME_ENCODE(&attr
->ia_atime
, zp
->z_atime
);
2509 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_ATIME(zsb
), NULL
,
2510 &zp
->z_atime
, sizeof (zp
->z_atime
));
2513 if (mask
& ATTR_MTIME
) {
2514 ZFS_TIME_ENCODE(&attr
->ia_mtime
, mtime
);
2515 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zsb
), NULL
,
2516 mtime
, sizeof (mtime
));
2519 /* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */
2520 if (mask
& ATTR_SIZE
&& !(mask
& ATTR_MTIME
)) {
2521 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zsb
),
2522 NULL
, mtime
, sizeof (mtime
));
2523 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zsb
), NULL
,
2524 &ctime
, sizeof (ctime
));
2525 zfs_tstamp_update_setup(zp
, CONTENT_MODIFIED
, mtime
, ctime
,
2527 } else if (mask
!= 0) {
2528 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zsb
), NULL
,
2529 &ctime
, sizeof (ctime
));
2530 zfs_tstamp_update_setup(zp
, STATE_CHANGED
, mtime
, ctime
,
2533 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
2534 SA_ZPL_CTIME(zsb
), NULL
,
2535 &ctime
, sizeof (ctime
));
2536 zfs_tstamp_update_setup(attrzp
, STATE_CHANGED
,
2537 mtime
, ctime
, B_TRUE
);
2541 * Do this after setting timestamps to prevent timestamp
2542 * update from toggling bit
2546 zfs_fuid_sync(zsb
, tx
);
2549 zfs_log_setattr(zilog
, tx
, TX_SETATTR
, zp
, attr
, mask
, fuidp
);
2551 mutex_exit(&zp
->z_lock
);
2552 if (mask
& (ATTR_UID
|ATTR_GID
|ATTR_MODE
))
2553 mutex_exit(&zp
->z_acl_lock
);
2556 if (mask
& (ATTR_UID
|ATTR_GID
|ATTR_MODE
))
2557 mutex_exit(&attrzp
->z_acl_lock
);
2558 mutex_exit(&attrzp
->z_lock
);
2561 if (err
== 0 && attrzp
) {
2562 err2
= sa_bulk_update(attrzp
->z_sa_hdl
, xattr_bulk
,
2573 zfs_fuid_info_free(fuidp
);
2579 if (err
== ERESTART
)
2582 err2
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, count
, tx
);
2584 zfs_inode_update(zp
);
2588 if (zsb
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
2589 zil_commit(zilog
, 0);
2594 EXPORT_SYMBOL(zfs_setattr
);
2596 typedef struct zfs_zlock
{
2597 krwlock_t
*zl_rwlock
; /* lock we acquired */
2598 znode_t
*zl_znode
; /* znode we held */
2599 struct zfs_zlock
*zl_next
; /* next in list */
2603 * Drop locks and release vnodes that were held by zfs_rename_lock().
2606 zfs_rename_unlock(zfs_zlock_t
**zlpp
)
2610 while ((zl
= *zlpp
) != NULL
) {
2611 if (zl
->zl_znode
!= NULL
)
2612 iput(ZTOI(zl
->zl_znode
));
2613 rw_exit(zl
->zl_rwlock
);
2614 *zlpp
= zl
->zl_next
;
2615 kmem_free(zl
, sizeof (*zl
));
2620 * Search back through the directory tree, using the ".." entries.
2621 * Lock each directory in the chain to prevent concurrent renames.
2622 * Fail any attempt to move a directory into one of its own descendants.
2623 * XXX - z_parent_lock can overlap with map or grow locks
2626 zfs_rename_lock(znode_t
*szp
, znode_t
*tdzp
, znode_t
*sdzp
, zfs_zlock_t
**zlpp
)
2630 uint64_t rootid
= ZTOZSB(zp
)->z_root
;
2631 uint64_t oidp
= zp
->z_id
;
2632 krwlock_t
*rwlp
= &szp
->z_parent_lock
;
2633 krw_t rw
= RW_WRITER
;
2636 * First pass write-locks szp and compares to zp->z_id.
2637 * Later passes read-lock zp and compare to zp->z_parent.
2640 if (!rw_tryenter(rwlp
, rw
)) {
2642 * Another thread is renaming in this path.
2643 * Note that if we are a WRITER, we don't have any
2644 * parent_locks held yet.
2646 if (rw
== RW_READER
&& zp
->z_id
> szp
->z_id
) {
2648 * Drop our locks and restart
2650 zfs_rename_unlock(&zl
);
2654 rwlp
= &szp
->z_parent_lock
;
2659 * Wait for other thread to drop its locks
2665 zl
= kmem_alloc(sizeof (*zl
), KM_SLEEP
);
2666 zl
->zl_rwlock
= rwlp
;
2667 zl
->zl_znode
= NULL
;
2668 zl
->zl_next
= *zlpp
;
2671 if (oidp
== szp
->z_id
) /* We're a descendant of szp */
2674 if (oidp
== rootid
) /* We've hit the top */
2677 if (rw
== RW_READER
) { /* i.e. not the first pass */
2678 int error
= zfs_zget(ZTOZSB(zp
), oidp
, &zp
);
2683 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_PARENT(ZTOZSB(zp
)),
2684 &oidp
, sizeof (oidp
));
2685 rwlp
= &zp
->z_parent_lock
;
2688 } while (zp
->z_id
!= sdzp
->z_id
);
2694 * Move an entry from the provided source directory to the target
2695 * directory. Change the entry name as indicated.
2697 * IN: sdip - Source directory containing the "old entry".
2698 * snm - Old entry name.
2699 * tdip - Target directory to contain the "new entry".
2700 * tnm - New entry name.
2701 * cr - credentials of caller.
2702 * flags - case flags
2704 * RETURN: 0 if success
2705 * error code if failure
2708 * sdip,tdip - ctime|mtime updated
2712 zfs_rename(struct inode
*sdip
, char *snm
, struct inode
*tdip
, char *tnm
,
2713 cred_t
*cr
, int flags
)
2715 znode_t
*tdzp
, *szp
, *tzp
;
2716 znode_t
*sdzp
= ITOZ(sdip
);
2717 zfs_sb_t
*zsb
= ITOZSB(sdip
);
2719 zfs_dirlock_t
*sdl
, *tdl
;
2722 int cmp
, serr
, terr
;
2727 ZFS_VERIFY_ZP(sdzp
);
2730 if (tdip
->i_sb
!= sdip
->i_sb
) {
2736 ZFS_VERIFY_ZP(tdzp
);
2737 if (zsb
->z_utf8
&& u8_validate(tnm
,
2738 strlen(tnm
), NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
2743 if (flags
& FIGNORECASE
)
2752 * This is to prevent the creation of links into attribute space
2753 * by renaming a linked file into/outof an attribute directory.
2754 * See the comment in zfs_link() for why this is considered bad.
2756 if ((tdzp
->z_pflags
& ZFS_XATTR
) != (sdzp
->z_pflags
& ZFS_XATTR
)) {
2762 * Lock source and target directory entries. To prevent deadlock,
2763 * a lock ordering must be defined. We lock the directory with
2764 * the smallest object id first, or if it's a tie, the one with
2765 * the lexically first name.
2767 if (sdzp
->z_id
< tdzp
->z_id
) {
2769 } else if (sdzp
->z_id
> tdzp
->z_id
) {
2773 * First compare the two name arguments without
2774 * considering any case folding.
2776 int nofold
= (zsb
->z_norm
& ~U8_TEXTPREP_TOUPPER
);
2778 cmp
= u8_strcmp(snm
, tnm
, 0, nofold
, U8_UNICODE_LATEST
, &error
);
2779 ASSERT(error
== 0 || !zsb
->z_utf8
);
2782 * POSIX: "If the old argument and the new argument
2783 * both refer to links to the same existing file,
2784 * the rename() function shall return successfully
2785 * and perform no other action."
2791 * If the file system is case-folding, then we may
2792 * have some more checking to do. A case-folding file
2793 * system is either supporting mixed case sensitivity
2794 * access or is completely case-insensitive. Note
2795 * that the file system is always case preserving.
2797 * In mixed sensitivity mode case sensitive behavior
2798 * is the default. FIGNORECASE must be used to
2799 * explicitly request case insensitive behavior.
2801 * If the source and target names provided differ only
2802 * by case (e.g., a request to rename 'tim' to 'Tim'),
2803 * we will treat this as a special case in the
2804 * case-insensitive mode: as long as the source name
2805 * is an exact match, we will allow this to proceed as
2806 * a name-change request.
2808 if ((zsb
->z_case
== ZFS_CASE_INSENSITIVE
||
2809 (zsb
->z_case
== ZFS_CASE_MIXED
&&
2810 flags
& FIGNORECASE
)) &&
2811 u8_strcmp(snm
, tnm
, 0, zsb
->z_norm
, U8_UNICODE_LATEST
,
2814 * case preserving rename request, require exact
2823 * If the source and destination directories are the same, we should
2824 * grab the z_name_lock of that directory only once.
2828 rw_enter(&sdzp
->z_name_lock
, RW_READER
);
2832 serr
= zfs_dirent_lock(&sdl
, sdzp
, snm
, &szp
,
2833 ZEXISTS
| zflg
, NULL
, NULL
);
2834 terr
= zfs_dirent_lock(&tdl
,
2835 tdzp
, tnm
, &tzp
, ZRENAMING
| zflg
, NULL
, NULL
);
2837 terr
= zfs_dirent_lock(&tdl
,
2838 tdzp
, tnm
, &tzp
, zflg
, NULL
, NULL
);
2839 serr
= zfs_dirent_lock(&sdl
,
2840 sdzp
, snm
, &szp
, ZEXISTS
| ZRENAMING
| zflg
,
2846 * Source entry invalid or not there.
2849 zfs_dirent_unlock(tdl
);
2855 rw_exit(&sdzp
->z_name_lock
);
2857 if (strcmp(snm
, "..") == 0)
2863 zfs_dirent_unlock(sdl
);
2867 rw_exit(&sdzp
->z_name_lock
);
2869 if (strcmp(tnm
, "..") == 0)
2876 * Must have write access at the source to remove the old entry
2877 * and write access at the target to create the new entry.
2878 * Note that if target and source are the same, this can be
2879 * done in a single check.
2882 if ((error
= zfs_zaccess_rename(sdzp
, szp
, tdzp
, tzp
, cr
)))
2885 if (S_ISDIR(ZTOI(szp
)->i_mode
)) {
2887 * Check to make sure rename is valid.
2888 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
2890 if ((error
= zfs_rename_lock(szp
, tdzp
, sdzp
, &zl
)))
2895 * Does target exist?
2899 * Source and target must be the same type.
2901 if (S_ISDIR(ZTOI(szp
)->i_mode
)) {
2902 if (!S_ISDIR(ZTOI(tzp
)->i_mode
)) {
2907 if (S_ISDIR(ZTOI(tzp
)->i_mode
)) {
2913 * POSIX dictates that when the source and target
2914 * entries refer to the same file object, rename
2915 * must do nothing and exit without error.
2917 if (szp
->z_id
== tzp
->z_id
) {
2923 tx
= dmu_tx_create(zsb
->z_os
);
2924 dmu_tx_hold_sa(tx
, szp
->z_sa_hdl
, B_FALSE
);
2925 dmu_tx_hold_sa(tx
, sdzp
->z_sa_hdl
, B_FALSE
);
2926 dmu_tx_hold_zap(tx
, sdzp
->z_id
, FALSE
, snm
);
2927 dmu_tx_hold_zap(tx
, tdzp
->z_id
, TRUE
, tnm
);
2929 dmu_tx_hold_sa(tx
, tdzp
->z_sa_hdl
, B_FALSE
);
2930 zfs_sa_upgrade_txholds(tx
, tdzp
);
2933 dmu_tx_hold_sa(tx
, tzp
->z_sa_hdl
, B_FALSE
);
2934 zfs_sa_upgrade_txholds(tx
, tzp
);
2937 zfs_sa_upgrade_txholds(tx
, szp
);
2938 dmu_tx_hold_zap(tx
, zsb
->z_unlinkedobj
, FALSE
, NULL
);
2939 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
2942 zfs_rename_unlock(&zl
);
2943 zfs_dirent_unlock(sdl
);
2944 zfs_dirent_unlock(tdl
);
2947 rw_exit(&sdzp
->z_name_lock
);
2952 if (error
== ERESTART
) {
2962 if (tzp
) /* Attempt to remove the existing target */
2963 error
= zfs_link_destroy(tdl
, tzp
, tx
, zflg
, NULL
);
2966 error
= zfs_link_create(tdl
, szp
, tx
, ZRENAMING
);
2968 szp
->z_pflags
|= ZFS_AV_MODIFIED
;
2970 error
= sa_update(szp
->z_sa_hdl
, SA_ZPL_FLAGS(zsb
),
2971 (void *)&szp
->z_pflags
, sizeof (uint64_t), tx
);
2972 ASSERT3U(error
, ==, 0);
2974 error
= zfs_link_destroy(sdl
, szp
, tx
, ZRENAMING
, NULL
);
2976 zfs_log_rename(zilog
, tx
, TX_RENAME
|
2977 (flags
& FIGNORECASE
? TX_CI
: 0), sdzp
,
2978 sdl
->dl_name
, tdzp
, tdl
->dl_name
, szp
);
2981 * At this point, we have successfully created
2982 * the target name, but have failed to remove
2983 * the source name. Since the create was done
2984 * with the ZRENAMING flag, there are
2985 * complications; for one, the link count is
2986 * wrong. The easiest way to deal with this
2987 * is to remove the newly created target, and
2988 * return the original error. This must
2989 * succeed; fortunately, it is very unlikely to
2990 * fail, since we just created it.
2992 VERIFY3U(zfs_link_destroy(tdl
, szp
, tx
,
2993 ZRENAMING
, NULL
), ==, 0);
3001 zfs_rename_unlock(&zl
);
3003 zfs_dirent_unlock(sdl
);
3004 zfs_dirent_unlock(tdl
);
3006 zfs_inode_update(sdzp
);
3008 rw_exit(&sdzp
->z_name_lock
);
3011 zfs_inode_update(tdzp
);
3013 zfs_inode_update(szp
);
3016 zfs_inode_update(tzp
);
3020 if (zsb
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
3021 zil_commit(zilog
, 0);
3026 EXPORT_SYMBOL(zfs_rename
);
3029 * Insert the indicated symbolic reference entry into the directory.
3031 * IN: dip - Directory to contain new symbolic link.
3032 * link - Name for new symlink entry.
3033 * vap - Attributes of new entry.
3034 * target - Target path of new symlink.
3036 * cr - credentials of caller.
3037 * flags - case flags
3039 * RETURN: 0 if success
3040 * error code if failure
3043 * dip - ctime|mtime updated
3047 zfs_symlink(struct inode
*dip
, char *name
, vattr_t
*vap
, char *link
,
3048 struct inode
**ipp
, cred_t
*cr
, int flags
)
3050 znode_t
*zp
, *dzp
= ITOZ(dip
);
3053 zfs_sb_t
*zsb
= ITOZSB(dip
);
3055 uint64_t len
= strlen(link
);
3058 zfs_acl_ids_t acl_ids
;
3059 boolean_t fuid_dirtied
;
3060 uint64_t txtype
= TX_SYMLINK
;
3062 ASSERT(S_ISLNK(vap
->va_mode
));
3068 if (zsb
->z_utf8
&& u8_validate(name
, strlen(name
),
3069 NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
3073 if (flags
& FIGNORECASE
)
3076 if (len
> MAXPATHLEN
) {
3078 return (ENAMETOOLONG
);
3081 if ((error
= zfs_acl_ids_create(dzp
, 0,
3082 vap
, cr
, NULL
, &acl_ids
)) != 0) {
3090 * Attempt to lock directory; fail if entry already exists.
3092 error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
, NULL
, NULL
);
3094 zfs_acl_ids_free(&acl_ids
);
3099 if ((error
= zfs_zaccess(dzp
, ACE_ADD_FILE
, 0, B_FALSE
, cr
))) {
3100 zfs_acl_ids_free(&acl_ids
);
3101 zfs_dirent_unlock(dl
);
3106 if (zfs_acl_ids_overquota(zsb
, &acl_ids
)) {
3107 zfs_acl_ids_free(&acl_ids
);
3108 zfs_dirent_unlock(dl
);
3112 tx
= dmu_tx_create(zsb
->z_os
);
3113 fuid_dirtied
= zsb
->z_fuid_dirty
;
3114 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
, 0, MAX(1, len
));
3115 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, name
);
3116 dmu_tx_hold_sa_create(tx
, acl_ids
.z_aclp
->z_acl_bytes
+
3117 ZFS_SA_BASE_ATTR_SIZE
+ len
);
3118 dmu_tx_hold_sa(tx
, dzp
->z_sa_hdl
, B_FALSE
);
3119 if (!zsb
->z_use_sa
&& acl_ids
.z_aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
3120 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
, 0,
3121 acl_ids
.z_aclp
->z_acl_bytes
);
3124 zfs_fuid_txhold(zsb
, tx
);
3125 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
3127 zfs_dirent_unlock(dl
);
3128 if (error
== ERESTART
) {
3133 zfs_acl_ids_free(&acl_ids
);
3140 * Create a new object for the symlink.
3141 * for version 4 ZPL datsets the symlink will be an SA attribute
3143 zfs_mknode(dzp
, vap
, tx
, cr
, 0, &zp
, &acl_ids
);
3146 zfs_fuid_sync(zsb
, tx
);
3148 mutex_enter(&zp
->z_lock
);
3150 error
= sa_update(zp
->z_sa_hdl
, SA_ZPL_SYMLINK(zsb
),
3153 zfs_sa_symlink(zp
, link
, len
, tx
);
3154 mutex_exit(&zp
->z_lock
);
3157 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_SIZE(zsb
),
3158 &zp
->z_size
, sizeof (zp
->z_size
), tx
);
3160 * Insert the new object into the directory.
3162 (void) zfs_link_create(dl
, zp
, tx
, ZNEW
);
3164 if (flags
& FIGNORECASE
)
3166 zfs_log_symlink(zilog
, tx
, txtype
, dzp
, zp
, name
, link
);
3168 zfs_inode_update(dzp
);
3169 zfs_inode_update(zp
);
3171 zfs_acl_ids_free(&acl_ids
);
3175 zfs_dirent_unlock(dl
);
3180 if (zsb
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
3181 zil_commit(zilog
, 0);
3186 EXPORT_SYMBOL(zfs_symlink
);
3189 * Return, in the buffer contained in the provided uio structure,
3190 * the symbolic path referred to by ip.
3192 * IN: dentry - dentry of symbolic link.
3193 * nd - namedata for symlink
3195 * RETURN: 0 if success
3196 * error code if failure
3199 * ip - atime updated
3203 zfs_follow_link(struct dentry
*dentry
, struct nameidata
*nd
)
3205 struct inode
*ip
= dentry
->d_inode
;
3206 znode_t
*zp
= ITOZ(ip
);
3207 zfs_sb_t
*zsb
= ITOZSB(ip
);
3215 iov
.iov_len
= MAXPATHLEN
+ 1;
3216 iov
.iov_base
= kmem_zalloc(iov
.iov_len
, KM_SLEEP
);
3220 uio
.uio_resid
= iov
.iov_len
;
3221 uio
.uio_segflg
= UIO_SYSSPACE
;
3223 mutex_enter(&zp
->z_lock
);
3225 error
= sa_lookup_uio(zp
->z_sa_hdl
, SA_ZPL_SYMLINK(zsb
), &uio
);
3227 error
= zfs_sa_readlink(zp
, &uio
);
3228 mutex_exit(&zp
->z_lock
);
3230 ZFS_ACCESSTIME_STAMP(zsb
, zp
);
3231 zfs_inode_update(zp
);
3234 kmem_free(iov
.iov_base
, iov
.iov_len
);
3235 nd_set_link(nd
, ERR_PTR(error
));
3237 nd_set_link(nd
, iov
.iov_base
);
3243 EXPORT_SYMBOL(zfs_follow_link
);
3246 * Insert a new entry into directory tdip referencing sip.
3248 * IN: tdip - Directory to contain new entry.
3249 * sip - inode of new entry.
3250 * name - name of new entry.
3251 * cr - credentials of caller.
3253 * RETURN: 0 if success
3254 * error code if failure
3257 * tdip - ctime|mtime updated
3258 * sip - ctime updated
3262 zfs_link(struct inode
*tdip
, struct inode
*sip
, char *name
, cred_t
*cr
)
3264 znode_t
*dzp
= ITOZ(tdip
);
3266 zfs_sb_t
*zsb
= ITOZSB(tdip
);
3275 ASSERT(S_ISDIR(tdip
->i_mode
));
3282 * POSIX dictates that we return EPERM here.
3283 * Better choices include ENOTSUP or EISDIR.
3285 if (S_ISDIR(sip
->i_mode
)) {
3290 if (sip
->i_sb
!= tdip
->i_sb
) {
3298 /* Prevent links to .zfs/shares files */
3300 if ((error
= sa_lookup(szp
->z_sa_hdl
, SA_ZPL_PARENT(zsb
),
3301 &parent
, sizeof (uint64_t))) != 0) {
3305 if (parent
== zsb
->z_shares_dir
) {
3310 if (zsb
->z_utf8
&& u8_validate(name
,
3311 strlen(name
), NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
3315 #ifdef HAVE_PN_UTILS
3316 if (flags
& FIGNORECASE
)
3318 #endif /* HAVE_PN_UTILS */
3321 * We do not support links between attributes and non-attributes
3322 * because of the potential security risk of creating links
3323 * into "normal" file space in order to circumvent restrictions
3324 * imposed in attribute space.
3326 if ((szp
->z_pflags
& ZFS_XATTR
) != (dzp
->z_pflags
& ZFS_XATTR
)) {
3331 owner
= zfs_fuid_map_id(zsb
, szp
->z_uid
, cr
, ZFS_OWNER
);
3332 if (owner
!= crgetuid(cr
) && secpolicy_basic_link(cr
) != 0) {
3337 if ((error
= zfs_zaccess(dzp
, ACE_ADD_FILE
, 0, B_FALSE
, cr
))) {
3344 * Attempt to lock directory; fail if entry already exists.
3346 error
= zfs_dirent_lock(&dl
, dzp
, name
, &tzp
, zf
, NULL
, NULL
);
3352 tx
= dmu_tx_create(zsb
->z_os
);
3353 dmu_tx_hold_sa(tx
, szp
->z_sa_hdl
, B_FALSE
);
3354 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, name
);
3355 zfs_sa_upgrade_txholds(tx
, szp
);
3356 zfs_sa_upgrade_txholds(tx
, dzp
);
3357 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
3359 zfs_dirent_unlock(dl
);
3360 if (error
== ERESTART
) {
3370 error
= zfs_link_create(dl
, szp
, tx
, 0);
3373 uint64_t txtype
= TX_LINK
;
3374 #ifdef HAVE_PN_UTILS
3375 if (flags
& FIGNORECASE
)
3377 #endif /* HAVE_PN_UTILS */
3378 zfs_log_link(zilog
, tx
, txtype
, dzp
, szp
, name
);
3383 zfs_dirent_unlock(dl
);
3385 if (zsb
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
3386 zil_commit(zilog
, 0);
3388 zfs_inode_update(dzp
);
3389 zfs_inode_update(szp
);
3393 EXPORT_SYMBOL(zfs_link
);
3396 * zfs_null_putapage() is used when the file system has been force
3397 * unmounted. It just drops the pages.
3401 zfs_null_putapage(vnode_t
*vp
, page_t
*pp
, u_offset_t
*offp
,
3402 size_t *lenp
, int flags
, cred_t
*cr
)
3404 pvn_write_done(pp
, B_INVAL
|B_FORCE
|B_ERROR
);
3409 * Push a page out to disk, klustering if possible.
3411 * IN: vp - file to push page to.
3412 * pp - page to push.
3413 * flags - additional flags.
3414 * cr - credentials of caller.
3416 * OUT: offp - start of range pushed.
3417 * lenp - len of range pushed.
3419 * RETURN: 0 if success
3420 * error code if failure
3422 * NOTE: callers must have locked the page to be pushed. On
3423 * exit, the page (and all other pages in the kluster) must be
3428 zfs_putapage(vnode_t
*vp
, page_t
*pp
, u_offset_t
*offp
,
3429 size_t *lenp
, int flags
, cred_t
*cr
)
3431 znode_t
*zp
= VTOZ(vp
);
3432 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
3434 u_offset_t off
, koff
;
3441 * If our blocksize is bigger than the page size, try to kluster
3442 * multiple pages so that we write a full block (thus avoiding
3443 * a read-modify-write).
3445 if (off
< zp
->z_size
&& zp
->z_blksz
> PAGESIZE
) {
3446 klen
= P2ROUNDUP((ulong_t
)zp
->z_blksz
, PAGESIZE
);
3447 koff
= ISP2(klen
) ? P2ALIGN(off
, (u_offset_t
)klen
) : 0;
3448 ASSERT(koff
<= zp
->z_size
);
3449 if (koff
+ klen
> zp
->z_size
)
3450 klen
= P2ROUNDUP(zp
->z_size
- koff
, (uint64_t)PAGESIZE
);
3451 pp
= pvn_write_kluster(vp
, pp
, &off
, &len
, koff
, klen
, flags
);
3453 ASSERT3U(btop(len
), ==, btopr(len
));
3456 * Can't push pages past end-of-file.
3458 if (off
>= zp
->z_size
) {
3459 /* ignore all pages */
3462 } else if (off
+ len
> zp
->z_size
) {
3463 int npages
= btopr(zp
->z_size
- off
);
3466 page_list_break(&pp
, &trunc
, npages
);
3467 /* ignore pages past end of file */
3469 pvn_write_done(trunc
, flags
);
3470 len
= zp
->z_size
- off
;
3473 if (zfs_owner_overquota(zfsvfs
, zp
, B_FALSE
) ||
3474 zfs_owner_overquota(zfsvfs
, zp
, B_TRUE
)) {
3479 tx
= dmu_tx_create(zfsvfs
->z_os
);
3480 dmu_tx_hold_write(tx
, zp
->z_id
, off
, len
);
3482 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
3483 zfs_sa_upgrade_txholds(tx
, zp
);
3484 err
= dmu_tx_assign(tx
, TXG_NOWAIT
);
3486 if (err
== ERESTART
) {
3495 if (zp
->z_blksz
<= PAGESIZE
) {
3496 caddr_t va
= zfs_map_page(pp
, S_READ
);
3497 ASSERT3U(len
, <=, PAGESIZE
);
3498 dmu_write(zfsvfs
->z_os
, zp
->z_id
, off
, len
, va
, tx
);
3499 zfs_unmap_page(pp
, va
);
3501 err
= dmu_write_pages(zfsvfs
->z_os
, zp
->z_id
, off
, len
, pp
, tx
);
3505 uint64_t mtime
[2], ctime
[2];
3506 sa_bulk_attr_t bulk
[3];
3509 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
,
3511 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
,
3513 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zfsvfs
), NULL
,
3515 zfs_tstamp_update_setup(zp
, CONTENT_MODIFIED
, mtime
, ctime
,
3517 zfs_log_write(zfsvfs
->z_log
, tx
, TX_WRITE
, zp
, off
, len
, 0);
3522 pvn_write_done(pp
, (err
? B_ERROR
: 0) | flags
);
3532 * Copy the portion of the file indicated from pages into the file.
3533 * The pages are stored in a page list attached to the files vnode.
3535 * IN: vp - vnode of file to push page data to.
3536 * off - position in file to put data.
3537 * len - amount of data to write.
3538 * flags - flags to control the operation.
3539 * cr - credentials of caller.
3540 * ct - caller context.
3542 * RETURN: 0 if success
3543 * error code if failure
3546 * vp - ctime|mtime updated
3550 zfs_putpage(vnode_t
*vp
, offset_t off
, size_t len
, int flags
, cred_t
*cr
,
3551 caller_context_t
*ct
)
3553 znode_t
*zp
= VTOZ(vp
);
3554 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
3566 * Align this request to the file block size in case we kluster.
3567 * XXX - this can result in pretty aggresive locking, which can
3568 * impact simultanious read/write access. One option might be
3569 * to break up long requests (len == 0) into block-by-block
3570 * operations to get narrower locking.
3572 blksz
= zp
->z_blksz
;
3574 io_off
= P2ALIGN_TYPED(off
, blksz
, u_offset_t
);
3577 if (len
> 0 && ISP2(blksz
))
3578 io_len
= P2ROUNDUP_TYPED(len
+ (off
- io_off
), blksz
, size_t);
3584 * Search the entire vp list for pages >= io_off.
3586 rl
= zfs_range_lock(zp
, io_off
, UINT64_MAX
, RL_WRITER
);
3587 error
= pvn_vplist_dirty(vp
, io_off
, zfs_putapage
, flags
, cr
);
3590 rl
= zfs_range_lock(zp
, io_off
, io_len
, RL_WRITER
);
3592 if (off
> zp
->z_size
) {
3593 /* past end of file */
3594 zfs_range_unlock(rl
);
3599 len
= MIN(io_len
, P2ROUNDUP(zp
->z_size
, PAGESIZE
) - io_off
);
3601 for (off
= io_off
; io_off
< off
+ len
; io_off
+= io_len
) {
3602 if ((flags
& B_INVAL
) || ((flags
& B_ASYNC
) == 0)) {
3603 pp
= page_lookup(vp
, io_off
,
3604 (flags
& (B_INVAL
| B_FREE
)) ? SE_EXCL
: SE_SHARED
);
3606 pp
= page_lookup_nowait(vp
, io_off
,
3607 (flags
& B_FREE
) ? SE_EXCL
: SE_SHARED
);
3610 if (pp
!= NULL
&& pvn_getdirty(pp
, flags
)) {
3614 * Found a dirty page to push
3616 err
= zfs_putapage(vp
, pp
, &io_off
, &io_len
, flags
, cr
);
3624 zfs_range_unlock(rl
);
3625 if ((flags
& B_ASYNC
) == 0 || zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
3626 zil_commit(zfsvfs
->z_log
, zp
->z_id
);
3633 zfs_inactive(vnode_t
*vp
, cred_t
*cr
, caller_context_t
*ct
)
3635 znode_t
*zp
= VTOZ(vp
);
3636 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
3639 rw_enter(&zfsvfs
->z_teardown_inactive_lock
, RW_READER
);
3640 if (zp
->z_sa_hdl
== NULL
) {
3642 * The fs has been unmounted, or we did a
3643 * suspend/resume and this file no longer exists.
3645 if (vn_has_cached_data(vp
)) {
3646 (void) pvn_vplist_dirty(vp
, 0, zfs_null_putapage
,
3650 mutex_enter(&zp
->z_lock
);
3651 mutex_enter(&vp
->v_lock
);
3652 ASSERT(vp
->v_count
== 1);
3654 mutex_exit(&vp
->v_lock
);
3655 mutex_exit(&zp
->z_lock
);
3656 rw_exit(&zfsvfs
->z_teardown_inactive_lock
);
3662 * Attempt to push any data in the page cache. If this fails
3663 * we will get kicked out later in zfs_zinactive().
3665 if (vn_has_cached_data(vp
)) {
3666 (void) pvn_vplist_dirty(vp
, 0, zfs_putapage
, B_INVAL
|B_ASYNC
,
3670 if (zp
->z_atime_dirty
&& zp
->z_unlinked
== 0) {
3671 dmu_tx_t
*tx
= dmu_tx_create(zfsvfs
->z_os
);
3673 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
3674 zfs_sa_upgrade_txholds(tx
, zp
);
3675 error
= dmu_tx_assign(tx
, TXG_WAIT
);
3679 mutex_enter(&zp
->z_lock
);
3680 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_ATIME(zsb
),
3681 (void *)&zp
->z_atime
, sizeof (zp
->z_atime
), tx
);
3682 zp
->z_atime_dirty
= 0;
3683 mutex_exit(&zp
->z_lock
);
3689 rw_exit(&zsb
->z_teardown_inactive_lock
);
3691 EXPORT_SYMBOL(zfs_inactive
);
3694 * Bounds-check the seek operation.
3696 * IN: ip - inode seeking within
3697 * ooff - old file offset
3698 * noffp - pointer to new file offset
3699 * ct - caller context
3701 * RETURN: 0 if success
3702 * EINVAL if new offset invalid
3706 zfs_seek(struct inode
*ip
, offset_t ooff
, offset_t
*noffp
,
3707 caller_context_t
*ct
)
3709 if (S_ISDIR(ip
->i_mode
))
3711 return ((*noffp
< 0 || *noffp
> MAXOFFSET_T
) ? EINVAL
: 0);
3713 EXPORT_SYMBOL(zfs_seek
);
3716 * Pre-filter the generic locking function to trap attempts to place
3717 * a mandatory lock on a memory mapped file.
3720 zfs_frlock(vnode_t
*vp
, int cmd
, flock64_t
*bfp
, int flag
, offset_t offset
,
3721 flk_callback_t
*flk_cbp
, cred_t
*cr
, caller_context_t
*ct
)
3723 znode_t
*zp
= VTOZ(vp
);
3724 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
3730 * We are following the UFS semantics with respect to mapcnt
3731 * here: If we see that the file is mapped already, then we will
3732 * return an error, but we don't worry about races between this
3733 * function and zfs_map().
3735 if (zp
->z_mapcnt
> 0 && MANDMODE(zp
->z_mode
)) {
3740 return (fs_frlock(vp
, cmd
, bfp
, flag
, offset
, flk_cbp
, cr
, ct
));
3744 * If we can't find a page in the cache, we will create a new page
3745 * and fill it with file data. For efficiency, we may try to fill
3746 * multiple pages at once (klustering) to fill up the supplied page
3747 * list. Note that the pages to be filled are held with an exclusive
3748 * lock to prevent access by other threads while they are being filled.
3751 zfs_fillpage(vnode_t
*vp
, u_offset_t off
, struct seg
*seg
,
3752 caddr_t addr
, page_t
*pl
[], size_t plsz
, enum seg_rw rw
)
3754 znode_t
*zp
= VTOZ(vp
);
3755 page_t
*pp
, *cur_pp
;
3756 objset_t
*os
= zp
->z_zfsvfs
->z_os
;
3757 u_offset_t io_off
, total
;
3761 if (plsz
== PAGESIZE
|| zp
->z_blksz
<= PAGESIZE
) {
3763 * We only have a single page, don't bother klustering
3767 pp
= page_create_va(vp
, io_off
, io_len
,
3768 PG_EXCL
| PG_WAIT
, seg
, addr
);
3771 * Try to find enough pages to fill the page list
3773 pp
= pvn_read_kluster(vp
, off
, seg
, addr
, &io_off
,
3774 &io_len
, off
, plsz
, 0);
3778 * The page already exists, nothing to do here.
3785 * Fill the pages in the kluster.
3788 for (total
= io_off
+ io_len
; io_off
< total
; io_off
+= PAGESIZE
) {
3791 ASSERT3U(io_off
, ==, cur_pp
->p_offset
);
3792 va
= zfs_map_page(cur_pp
, S_WRITE
);
3793 err
= dmu_read(os
, zp
->z_id
, io_off
, PAGESIZE
, va
,
3795 zfs_unmap_page(cur_pp
, va
);
3797 /* On error, toss the entire kluster */
3798 pvn_read_done(pp
, B_ERROR
);
3799 /* convert checksum errors into IO errors */
3804 cur_pp
= cur_pp
->p_next
;
3808 * Fill in the page list array from the kluster starting
3809 * from the desired offset `off'.
3810 * NOTE: the page list will always be null terminated.
3812 pvn_plist_init(pp
, pl
, plsz
, off
, io_len
, rw
);
3813 ASSERT(pl
== NULL
|| (*pl
)->p_offset
== off
);
3819 * Return pointers to the pages for the file region [off, off + len]
3820 * in the pl array. If plsz is greater than len, this function may
3821 * also return page pointers from after the specified region
3822 * (i.e. the region [off, off + plsz]). These additional pages are
3823 * only returned if they are already in the cache, or were created as
3824 * part of a klustered read.
3826 * IN: vp - vnode of file to get data from.
3827 * off - position in file to get data from.
3828 * len - amount of data to retrieve.
3829 * plsz - length of provided page list.
3830 * seg - segment to obtain pages for.
3831 * addr - virtual address of fault.
3832 * rw - mode of created pages.
3833 * cr - credentials of caller.
3834 * ct - caller context.
3836 * OUT: protp - protection mode of created pages.
3837 * pl - list of pages created.
3839 * RETURN: 0 if success
3840 * error code if failure
3843 * vp - atime updated
3847 zfs_getpage(vnode_t
*vp
, offset_t off
, size_t len
, uint_t
*protp
,
3848 page_t
*pl
[], size_t plsz
, struct seg
*seg
, caddr_t addr
,
3849 enum seg_rw rw
, cred_t
*cr
, caller_context_t
*ct
)
3851 znode_t
*zp
= VTOZ(vp
);
3852 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
3856 /* we do our own caching, faultahead is unnecessary */
3859 else if (len
> plsz
)
3862 len
= P2ROUNDUP(len
, PAGESIZE
);
3863 ASSERT(plsz
>= len
);
3872 * Loop through the requested range [off, off + len) looking
3873 * for pages. If we don't find a page, we will need to create
3874 * a new page and fill it with data from the file.
3877 if (*pl
= page_lookup(vp
, off
, SE_SHARED
))
3879 else if (err
= zfs_fillpage(vp
, off
, seg
, addr
, pl
, plsz
, rw
))
3882 ASSERT3U((*pl
)->p_offset
, ==, off
);
3886 ASSERT3U(len
, >=, PAGESIZE
);
3889 ASSERT3U(plsz
, >=, PAGESIZE
);
3896 * Fill out the page array with any pages already in the cache.
3899 (*pl
++ = page_lookup_nowait(vp
, off
, SE_SHARED
))) {
3906 * Release any pages we have previously locked.
3911 ZFS_ACCESSTIME_STAMP(zfsvfs
, zp
);
3921 * Request a memory map for a section of a file. This code interacts
3922 * with common code and the VM system as follows:
3924 * common code calls mmap(), which ends up in smmap_common()
3926 * this calls VOP_MAP(), which takes you into (say) zfs
3928 * zfs_map() calls as_map(), passing segvn_create() as the callback
3930 * segvn_create() creates the new segment and calls VOP_ADDMAP()
3932 * zfs_addmap() updates z_mapcnt
3936 zfs_map(vnode_t
*vp
, offset_t off
, struct as
*as
, caddr_t
*addrp
,
3937 size_t len
, uchar_t prot
, uchar_t maxprot
, uint_t flags
, cred_t
*cr
,
3938 caller_context_t
*ct
)
3940 znode_t
*zp
= VTOZ(vp
);
3941 zfsvfs_t
*zfsvfs
= zp
->z_zfsvfs
;
3942 segvn_crargs_t vn_a
;
3948 if ((prot
& PROT_WRITE
) && (zp
->z_pflags
&
3949 (ZFS_IMMUTABLE
| ZFS_READONLY
| ZFS_APPENDONLY
))) {
3954 if ((prot
& (PROT_READ
| PROT_EXEC
)) &&
3955 (zp
->z_pflags
& ZFS_AV_QUARANTINED
)) {
3960 if (vp
->v_flag
& VNOMAP
) {
3965 if (off
< 0 || len
> MAXOFFSET_T
- off
) {
3970 if (vp
->v_type
!= VREG
) {
3976 * If file is locked, disallow mapping.
3978 if (MANDMODE(zp
->z_mode
) && vn_has_flocks(vp
)) {
3984 error
= choose_addr(as
, addrp
, len
, off
, ADDR_VACALIGN
, flags
);
3992 vn_a
.offset
= (u_offset_t
)off
;
3993 vn_a
.type
= flags
& MAP_TYPE
;
3995 vn_a
.maxprot
= maxprot
;
3998 vn_a
.flags
= flags
& ~MAP_TYPE
;
4000 vn_a
.lgrp_mem_policy_flags
= 0;
4002 error
= as_map(as
, *addrp
, len
, segvn_create
, &vn_a
);
4011 zfs_addmap(vnode_t
*vp
, offset_t off
, struct as
*as
, caddr_t addr
,
4012 size_t len
, uchar_t prot
, uchar_t maxprot
, uint_t flags
, cred_t
*cr
,
4013 caller_context_t
*ct
)
4015 uint64_t pages
= btopr(len
);
4017 atomic_add_64(&VTOZ(vp
)->z_mapcnt
, pages
);
4022 * The reason we push dirty pages as part of zfs_delmap() is so that we get a
4023 * more accurate mtime for the associated file. Since we don't have a way of
4024 * detecting when the data was actually modified, we have to resort to
4025 * heuristics. If an explicit msync() is done, then we mark the mtime when the
4026 * last page is pushed. The problem occurs when the msync() call is omitted,
4027 * which by far the most common case:
4035 * putpage() via fsflush
4037 * If we wait until fsflush to come along, we can have a modification time that
4038 * is some arbitrary point in the future. In order to prevent this in the
4039 * common case, we flush pages whenever a (MAP_SHARED, PROT_WRITE) mapping is
4044 zfs_delmap(vnode_t
*vp
, offset_t off
, struct as
*as
, caddr_t addr
,
4045 size_t len
, uint_t prot
, uint_t maxprot
, uint_t flags
, cred_t
*cr
,
4046 caller_context_t
*ct
)
4048 uint64_t pages
= btopr(len
);
4050 ASSERT3U(VTOZ(vp
)->z_mapcnt
, >=, pages
);
4051 atomic_add_64(&VTOZ(vp
)->z_mapcnt
, -pages
);
4053 if ((flags
& MAP_SHARED
) && (prot
& PROT_WRITE
) &&
4054 vn_has_cached_data(vp
))
4055 (void) VOP_PUTPAGE(vp
, off
, len
, B_ASYNC
, cr
, ct
);
4061 * convoff - converts the given data (start, whence) to the
4065 convoff(struct inode
*ip
, flock64_t
*lckdat
, int whence
, offset_t offset
)
4070 if ((lckdat
->l_whence
== 2) || (whence
== 2)) {
4071 if ((error
= zfs_getattr(ip
, &stat
, 0, CRED()) != 0))
4075 switch (lckdat
->l_whence
) {
4077 lckdat
->l_start
+= offset
;
4080 lckdat
->l_start
+= stat
.size
;
4088 if (lckdat
->l_start
< 0)
4093 lckdat
->l_start
-= offset
;
4096 lckdat
->l_start
-= stat
.size
;
4104 lckdat
->l_whence
= (short)whence
;
4109 * Free or allocate space in a file. Currently, this function only
4110 * supports the `F_FREESP' command. However, this command is somewhat
4111 * misnamed, as its functionality includes the ability to allocate as
4112 * well as free space.
4114 * IN: ip - inode of file to free data in.
4115 * cmd - action to take (only F_FREESP supported).
4116 * bfp - section of file to free/alloc.
4117 * flag - current file open mode flags.
4118 * offset - current file offset.
4119 * cr - credentials of caller [UNUSED].
4121 * RETURN: 0 if success
4122 * error code if failure
4125 * ip - ctime|mtime updated
4129 zfs_space(struct inode
*ip
, int cmd
, flock64_t
*bfp
, int flag
,
4130 offset_t offset
, cred_t
*cr
)
4132 znode_t
*zp
= ITOZ(ip
);
4133 zfs_sb_t
*zsb
= ITOZSB(ip
);
4140 if (cmd
!= F_FREESP
) {
4145 if ((error
= convoff(ip
, bfp
, 0, offset
))) {
4150 if (bfp
->l_len
< 0) {
4156 len
= bfp
->l_len
; /* 0 means from off to end of file */
4158 error
= zfs_freesp(zp
, off
, len
, flag
, TRUE
);
4163 EXPORT_SYMBOL(zfs_space
);
4167 zfs_fid(struct inode
*ip
, fid_t
*fidp
)
4169 znode_t
*zp
= ITOZ(ip
);
4170 zfs_sb_t
*zsb
= ITOZSB(ip
);
4173 uint64_t object
= zp
->z_id
;
4180 if ((error
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_GEN(zsb
),
4181 &gen64
, sizeof (uint64_t))) != 0) {
4186 gen
= (uint32_t)gen64
;
4188 size
= (zsb
->z_parent
!= zsb
) ? LONG_FID_LEN
: SHORT_FID_LEN
;
4189 if (fidp
->fid_len
< size
) {
4190 fidp
->fid_len
= size
;
4195 zfid
= (zfid_short_t
*)fidp
;
4197 zfid
->zf_len
= size
;
4199 for (i
= 0; i
< sizeof (zfid
->zf_object
); i
++)
4200 zfid
->zf_object
[i
] = (uint8_t)(object
>> (8 * i
));
4202 /* Must have a non-zero generation number to distinguish from .zfs */
4205 for (i
= 0; i
< sizeof (zfid
->zf_gen
); i
++)
4206 zfid
->zf_gen
[i
] = (uint8_t)(gen
>> (8 * i
));
4208 if (size
== LONG_FID_LEN
) {
4209 uint64_t objsetid
= dmu_objset_id(zsb
->z_os
);
4212 zlfid
= (zfid_long_t
*)fidp
;
4214 for (i
= 0; i
< sizeof (zlfid
->zf_setid
); i
++)
4215 zlfid
->zf_setid
[i
] = (uint8_t)(objsetid
>> (8 * i
));
4217 /* XXX - this should be the generation number for the objset */
4218 for (i
= 0; i
< sizeof (zlfid
->zf_setgen
); i
++)
4219 zlfid
->zf_setgen
[i
] = 0;
4225 EXPORT_SYMBOL(zfs_fid
);
4229 zfs_getsecattr(struct inode
*ip
, vsecattr_t
*vsecp
, int flag
, cred_t
*cr
)
4231 znode_t
*zp
= ITOZ(ip
);
4232 zfs_sb_t
*zsb
= ITOZSB(ip
);
4234 boolean_t skipaclchk
= (flag
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
4238 error
= zfs_getacl(zp
, vsecp
, skipaclchk
, cr
);
4243 EXPORT_SYMBOL(zfs_getsecattr
);
4247 zfs_setsecattr(struct inode
*ip
, vsecattr_t
*vsecp
, int flag
, cred_t
*cr
)
4249 znode_t
*zp
= ITOZ(ip
);
4250 zfs_sb_t
*zsb
= ITOZSB(ip
);
4252 boolean_t skipaclchk
= (flag
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
4253 zilog_t
*zilog
= zsb
->z_log
;
4258 error
= zfs_setacl(zp
, vsecp
, skipaclchk
, cr
);
4260 if (zsb
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
4261 zil_commit(zilog
, 0);
4266 EXPORT_SYMBOL(zfs_setsecattr
);
4268 #ifdef HAVE_UIO_ZEROCOPY
4270 * Tunable, both must be a power of 2.
4272 * zcr_blksz_min: the smallest read we may consider to loan out an arcbuf
4273 * zcr_blksz_max: if set to less than the file block size, allow loaning out of
4274 * an arcbuf for a partial block read
4276 int zcr_blksz_min
= (1 << 10); /* 1K */
4277 int zcr_blksz_max
= (1 << 17); /* 128K */
4281 zfs_reqzcbuf(struct inode
*ip
, enum uio_rw ioflag
, xuio_t
*xuio
, cred_t
*cr
)
4283 znode_t
*zp
= ITOZ(ip
);
4284 zfs_sb_t
*zsb
= ITOZSB(ip
);
4285 int max_blksz
= zsb
->z_max_blksz
;
4286 uio_t
*uio
= &xuio
->xu_uio
;
4287 ssize_t size
= uio
->uio_resid
;
4288 offset_t offset
= uio
->uio_loffset
;
4293 int preamble
, postamble
;
4295 if (xuio
->xu_type
!= UIOTYPE_ZEROCOPY
)
4303 * Loan out an arc_buf for write if write size is bigger than
4304 * max_blksz, and the file's block size is also max_blksz.
4307 if (size
< blksz
|| zp
->z_blksz
!= blksz
) {
4312 * Caller requests buffers for write before knowing where the
4313 * write offset might be (e.g. NFS TCP write).
4318 preamble
= P2PHASE(offset
, blksz
);
4320 preamble
= blksz
- preamble
;
4325 postamble
= P2PHASE(size
, blksz
);
4328 fullblk
= size
/ blksz
;
4329 (void) dmu_xuio_init(xuio
,
4330 (preamble
!= 0) + fullblk
+ (postamble
!= 0));
4333 * Have to fix iov base/len for partial buffers. They
4334 * currently represent full arc_buf's.
4337 /* data begins in the middle of the arc_buf */
4338 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
4341 (void) dmu_xuio_add(xuio
, abuf
,
4342 blksz
- preamble
, preamble
);
4345 for (i
= 0; i
< fullblk
; i
++) {
4346 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
4349 (void) dmu_xuio_add(xuio
, abuf
, 0, blksz
);
4353 /* data ends in the middle of the arc_buf */
4354 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
4357 (void) dmu_xuio_add(xuio
, abuf
, 0, postamble
);
4362 * Loan out an arc_buf for read if the read size is larger than
4363 * the current file block size. Block alignment is not
4364 * considered. Partial arc_buf will be loaned out for read.
4366 blksz
= zp
->z_blksz
;
4367 if (blksz
< zcr_blksz_min
)
4368 blksz
= zcr_blksz_min
;
4369 if (blksz
> zcr_blksz_max
)
4370 blksz
= zcr_blksz_max
;
4371 /* avoid potential complexity of dealing with it */
4372 if (blksz
> max_blksz
) {
4377 maxsize
= zp
->z_size
- uio
->uio_loffset
;
4391 uio
->uio_extflg
= UIO_XUIO
;
4392 XUIO_XUZC_RW(xuio
) = ioflag
;
4399 zfs_retzcbuf(struct inode
*ip
, xuio_t
*xuio
, cred_t
*cr
)
4403 int ioflag
= XUIO_XUZC_RW(xuio
);
4405 ASSERT(xuio
->xu_type
== UIOTYPE_ZEROCOPY
);
4407 i
= dmu_xuio_cnt(xuio
);
4409 abuf
= dmu_xuio_arcbuf(xuio
, i
);
4411 * if abuf == NULL, it must be a write buffer
4412 * that has been returned in zfs_write().
4415 dmu_return_arcbuf(abuf
);
4416 ASSERT(abuf
|| ioflag
== UIO_WRITE
);
4419 dmu_xuio_fini(xuio
);
4422 #endif /* HAVE_UIO_ZEROCOPY */