4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 /* Portions Copyright 2007 Jeremy Teo */
26 /* Portions Copyright 2010 Robert Milkowski */
29 #include <sys/types.h>
30 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/sysmacros.h>
34 #include <sys/resource.h>
36 #include <sys/vfs_opreg.h>
40 #include <sys/taskq.h>
42 #include <sys/vmsystm.h>
43 #include <sys/atomic.h>
45 #include <sys/pathname.h>
46 #include <sys/cmn_err.h>
47 #include <sys/errno.h>
48 #include <sys/unistd.h>
49 #include <sys/zfs_dir.h>
50 #include <sys/zfs_acl.h>
51 #include <sys/zfs_ioctl.h>
52 #include <sys/fs/zfs.h>
54 #include <sys/dmu_objset.h>
60 #include <sys/dirent.h>
61 #include <sys/policy.h>
62 #include <sys/sunddi.h>
65 #include "fs/fs_subr.h"
66 #include <sys/zfs_ctldir.h>
67 #include <sys/zfs_fuid.h>
68 #include <sys/zfs_sa.h>
69 #include <sys/zfs_vnops.h>
71 #include <sys/zfs_rlock.h>
72 #include <sys/extdirent.h>
73 #include <sys/kidmap.h>
81 * Each vnode op performs some logical unit of work. To do this, the ZPL must
82 * properly lock its in-core state, create a DMU transaction, do the work,
83 * record this work in the intent log (ZIL), commit the DMU transaction,
84 * and wait for the intent log to commit if it is a synchronous operation.
85 * Moreover, the vnode ops must work in both normal and log replay context.
86 * The ordering of events is important to avoid deadlocks and references
87 * to freed memory. The example below illustrates the following Big Rules:
89 * (1) A check must be made in each zfs thread for a mounted file system.
90 * This is done avoiding races using ZFS_ENTER(zsb).
91 * A ZFS_EXIT(zsb) is needed before all returns. Any znodes
92 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
93 * can return EIO from the calling function.
95 * (2) iput() should always be the last thing except for zil_commit()
96 * (if necessary) and ZFS_EXIT(). This is for 3 reasons:
97 * First, if it's the last reference, the vnode/znode
98 * can be freed, so the zp may point to freed memory. Second, the last
99 * reference will call zfs_zinactive(), which may induce a lot of work --
100 * pushing cached pages (which acquires range locks) and syncing out
101 * cached atime changes. Third, zfs_zinactive() may require a new tx,
102 * which could deadlock the system if you were already holding one.
103 * If you must call iput() within a tx then use iput_ASYNC().
105 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
106 * as they can span dmu_tx_assign() calls.
108 * (4) Always pass TXG_NOWAIT as the second argument to dmu_tx_assign().
109 * This is critical because we don't want to block while holding locks.
110 * Note, in particular, that if a lock is sometimes acquired before
111 * the tx assigns, and sometimes after (e.g. z_lock), then failing to
112 * use a non-blocking assign can deadlock the system. The scenario:
114 * Thread A has grabbed a lock before calling dmu_tx_assign().
115 * Thread B is in an already-assigned tx, and blocks for this lock.
116 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
117 * forever, because the previous txg can't quiesce until B's tx commits.
119 * If dmu_tx_assign() returns ERESTART and zsb->z_assign is TXG_NOWAIT,
120 * then drop all locks, call dmu_tx_wait(), and try again.
122 * (5) If the operation succeeded, generate the intent log entry for it
123 * before dropping locks. This ensures that the ordering of events
124 * in the intent log matches the order in which they actually occurred.
125 * During ZIL replay the zfs_log_* functions will update the sequence
126 * number to indicate the zil transaction has replayed.
128 * (6) At the end of each vnode op, the DMU tx must always commit,
129 * regardless of whether there were any errors.
131 * (7) After dropping all locks, invoke zil_commit(zilog, foid)
132 * to ensure that synchronous semantics are provided when necessary.
134 * In general, this is how things should be ordered in each vnode op:
136 * ZFS_ENTER(zsb); // exit if unmounted
138 * zfs_dirent_lock(&dl, ...) // lock directory entry (may igrab())
139 * rw_enter(...); // grab any other locks you need
140 * tx = dmu_tx_create(...); // get DMU tx
141 * dmu_tx_hold_*(); // hold each object you might modify
142 * error = dmu_tx_assign(tx, TXG_NOWAIT); // try to assign
144 * rw_exit(...); // drop locks
145 * zfs_dirent_unlock(dl); // unlock directory entry
146 * iput(...); // release held vnodes
147 * if (error == ERESTART) {
152 * dmu_tx_abort(tx); // abort DMU tx
153 * ZFS_EXIT(zsb); // finished in zfs
154 * return (error); // really out of space
156 * error = do_real_work(); // do whatever this VOP does
158 * zfs_log_*(...); // on success, make ZIL entry
159 * dmu_tx_commit(tx); // commit DMU tx -- error or not
160 * rw_exit(...); // drop locks
161 * zfs_dirent_unlock(dl); // unlock directory entry
162 * iput(...); // release held vnodes
163 * zil_commit(zilog, foid); // synchronous when necessary
164 * ZFS_EXIT(zsb); // finished in zfs
165 * return (error); // done, report error
169 * Virus scanning is unsupported. It would be possible to add a hook
170 * here to performance the required virus scan. This could be done
171 * entirely in the kernel or potentially as an update to invoke a
175 zfs_vscan(struct inode
*ip
, cred_t
*cr
, int async
)
182 zfs_open(struct inode
*ip
, int mode
, int flag
, cred_t
*cr
)
184 znode_t
*zp
= ITOZ(ip
);
185 zfs_sb_t
*zsb
= ITOZSB(ip
);
190 /* Honor ZFS_APPENDONLY file attribute */
191 if ((mode
& FMODE_WRITE
) && (zp
->z_pflags
& ZFS_APPENDONLY
) &&
192 ((flag
& O_APPEND
) == 0)) {
197 /* Virus scan eligible files on open */
198 if (!zfs_has_ctldir(zp
) && zsb
->z_vscan
&& S_ISREG(ip
->i_mode
) &&
199 !(zp
->z_pflags
& ZFS_AV_QUARANTINED
) && zp
->z_size
> 0) {
200 if (zfs_vscan(ip
, cr
, 0) != 0) {
206 /* Keep a count of the synchronous opens in the znode */
208 atomic_inc_32(&zp
->z_sync_cnt
);
213 EXPORT_SYMBOL(zfs_open
);
217 zfs_close(struct inode
*ip
, int flag
, cred_t
*cr
)
219 znode_t
*zp
= ITOZ(ip
);
220 zfs_sb_t
*zsb
= ITOZSB(ip
);
226 * Zero the synchronous opens in the znode. Under Linux the
227 * zfs_close() hook is not symmetric with zfs_open(), it is
228 * only called once when the last reference is dropped.
233 if (!zfs_has_ctldir(zp
) && zsb
->z_vscan
&& S_ISREG(ip
->i_mode
) &&
234 !(zp
->z_pflags
& ZFS_AV_QUARANTINED
) && zp
->z_size
> 0)
235 VERIFY(zfs_vscan(ip
, cr
, 1) == 0);
240 EXPORT_SYMBOL(zfs_close
);
244 * When a file is memory mapped, we must keep the IO data synchronized
245 * between the DMU cache and the memory mapped pages. What this means:
247 * On Write: If we find a memory mapped page, we write to *both*
248 * the page and the dmu buffer.
251 update_pages(struct inode
*ip
, int64_t start
, int len
,
252 objset_t
*os
, uint64_t oid
)
254 struct address_space
*mp
= ip
->i_mapping
;
260 off
= start
& (PAGE_CACHE_SIZE
-1);
261 for (start
&= PAGE_CACHE_MASK
; len
> 0; start
+= PAGE_CACHE_SIZE
) {
262 nbytes
= MIN(PAGE_CACHE_SIZE
- off
, len
);
264 pp
= find_lock_page(mp
, start
>> PAGE_CACHE_SHIFT
);
266 if (mapping_writably_mapped(mp
))
267 flush_dcache_page(pp
);
270 (void) dmu_read(os
, oid
, start
+off
, nbytes
, pb
+off
,
274 if (mapping_writably_mapped(mp
))
275 flush_dcache_page(pp
);
277 mark_page_accessed(pp
);
281 page_cache_release(pp
);
290 * When a file is memory mapped, we must keep the IO data synchronized
291 * between the DMU cache and the memory mapped pages. What this means:
293 * On Read: We "read" preferentially from memory mapped pages,
294 * else we default from the dmu buffer.
296 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
297 * the file is memory mapped.
300 mappedread(struct inode
*ip
, int nbytes
, uio_t
*uio
)
302 struct address_space
*mp
= ip
->i_mapping
;
304 znode_t
*zp
= ITOZ(ip
);
305 objset_t
*os
= ITOZSB(ip
)->z_os
;
312 start
= uio
->uio_loffset
;
313 off
= start
& (PAGE_CACHE_SIZE
-1);
314 for (start
&= PAGE_CACHE_MASK
; len
> 0; start
+= PAGE_CACHE_SIZE
) {
315 bytes
= MIN(PAGE_CACHE_SIZE
- off
, len
);
317 pp
= find_lock_page(mp
, start
>> PAGE_CACHE_SHIFT
);
319 ASSERT(PageUptodate(pp
));
322 error
= uiomove(pb
+ off
, bytes
, UIO_READ
, uio
);
325 if (mapping_writably_mapped(mp
))
326 flush_dcache_page(pp
);
328 mark_page_accessed(pp
);
330 page_cache_release(pp
);
332 error
= dmu_read_uio(os
, zp
->z_id
, uio
, bytes
);
344 unsigned long zfs_read_chunk_size
= 1024 * 1024; /* Tunable */
347 * Read bytes from specified file into supplied buffer.
349 * IN: ip - inode of file to be read from.
350 * uio - structure supplying read location, range info,
352 * ioflag - FSYNC flags; used to provide FRSYNC semantics.
353 * O_DIRECT flag; used to bypass page cache.
354 * cr - credentials of caller.
356 * OUT: uio - updated offset and range, buffer filled.
358 * RETURN: 0 if success
359 * error code if failure
362 * inode - atime updated if byte count > 0
366 zfs_read(struct inode
*ip
, uio_t
*uio
, int ioflag
, cred_t
*cr
)
368 znode_t
*zp
= ITOZ(ip
);
369 zfs_sb_t
*zsb
= ITOZSB(ip
);
374 #ifdef HAVE_UIO_ZEROCOPY
376 #endif /* HAVE_UIO_ZEROCOPY */
382 if (zp
->z_pflags
& ZFS_AV_QUARANTINED
) {
388 * Validate file offset
390 if (uio
->uio_loffset
< (offset_t
)0) {
396 * Fasttrack empty reads
398 if (uio
->uio_resid
== 0) {
404 * Check for mandatory locks
406 if (mandatory_lock(ip
) &&
407 !lock_may_read(ip
, uio
->uio_loffset
, uio
->uio_resid
)) {
413 * If we're in FRSYNC mode, sync out this znode before reading it.
415 if (ioflag
& FRSYNC
|| zsb
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
416 zil_commit(zsb
->z_log
, zp
->z_id
);
419 * Lock the range against changes.
421 rl
= zfs_range_lock(zp
, uio
->uio_loffset
, uio
->uio_resid
, RL_READER
);
424 * If we are reading past end-of-file we can skip
425 * to the end; but we might still need to set atime.
427 if (uio
->uio_loffset
>= zp
->z_size
) {
432 ASSERT(uio
->uio_loffset
< zp
->z_size
);
433 n
= MIN(uio
->uio_resid
, zp
->z_size
- uio
->uio_loffset
);
435 #ifdef HAVE_UIO_ZEROCOPY
436 if ((uio
->uio_extflg
== UIO_XUIO
) &&
437 (((xuio_t
*)uio
)->xu_type
== UIOTYPE_ZEROCOPY
)) {
439 int blksz
= zp
->z_blksz
;
440 uint64_t offset
= uio
->uio_loffset
;
442 xuio
= (xuio_t
*)uio
;
444 nblk
= (P2ROUNDUP(offset
+ n
, blksz
) - P2ALIGN(offset
,
447 ASSERT(offset
+ n
<= blksz
);
450 (void) dmu_xuio_init(xuio
, nblk
);
452 if (vn_has_cached_data(ip
)) {
454 * For simplicity, we always allocate a full buffer
455 * even if we only expect to read a portion of a block.
457 while (--nblk
>= 0) {
458 (void) dmu_xuio_add(xuio
,
459 dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
464 #endif /* HAVE_UIO_ZEROCOPY */
467 nbytes
= MIN(n
, zfs_read_chunk_size
-
468 P2PHASE(uio
->uio_loffset
, zfs_read_chunk_size
));
470 if (zp
->z_is_mapped
&& !(ioflag
& O_DIRECT
))
471 error
= mappedread(ip
, nbytes
, uio
);
473 error
= dmu_read_uio(os
, zp
->z_id
, uio
, nbytes
);
476 /* convert checksum errors into IO errors */
485 zfs_range_unlock(rl
);
487 ZFS_ACCESSTIME_STAMP(zsb
, zp
);
488 zfs_inode_update(zp
);
492 EXPORT_SYMBOL(zfs_read
);
495 * Write the bytes to a file.
497 * IN: ip - inode of file to be written to.
498 * uio - structure supplying write location, range info,
500 * ioflag - FAPPEND flag set if in append mode.
501 * O_DIRECT flag; used to bypass page cache.
502 * cr - credentials of caller.
504 * OUT: uio - updated offset and range.
506 * RETURN: 0 if success
507 * error code if failure
510 * ip - ctime|mtime updated if byte count > 0
515 zfs_write(struct inode
*ip
, uio_t
*uio
, int ioflag
, cred_t
*cr
)
517 znode_t
*zp
= ITOZ(ip
);
518 rlim64_t limit
= uio
->uio_limit
;
519 ssize_t start_resid
= uio
->uio_resid
;
523 zfs_sb_t
*zsb
= ZTOZSB(zp
);
528 int max_blksz
= zsb
->z_max_blksz
;
531 iovec_t
*aiov
= NULL
;
534 iovec_t
*iovp
= uio
->uio_iov
;
537 sa_bulk_attr_t bulk
[4];
538 uint64_t mtime
[2], ctime
[2];
539 ASSERTV(int iovcnt
= uio
->uio_iovcnt
);
542 * Fasttrack empty write
548 if (limit
== RLIM64_INFINITY
|| limit
> MAXOFFSET_T
)
554 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zsb
), NULL
, &mtime
, 16);
555 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zsb
), NULL
, &ctime
, 16);
556 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_SIZE(zsb
), NULL
, &zp
->z_size
, 8);
557 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zsb
), NULL
,
561 * If immutable or not appending then return EPERM
563 if ((zp
->z_pflags
& (ZFS_IMMUTABLE
| ZFS_READONLY
)) ||
564 ((zp
->z_pflags
& ZFS_APPENDONLY
) && !(ioflag
& FAPPEND
) &&
565 (uio
->uio_loffset
< zp
->z_size
))) {
573 * Validate file offset
575 woff
= ioflag
& FAPPEND
? zp
->z_size
: uio
->uio_loffset
;
582 * Check for mandatory locks before calling zfs_range_lock()
583 * in order to prevent a deadlock with locks set via fcntl().
585 if (mandatory_lock(ip
) && !lock_may_write(ip
, woff
, n
)) {
590 #ifdef HAVE_UIO_ZEROCOPY
592 * Pre-fault the pages to ensure slow (eg NFS) pages
594 * Skip this if uio contains loaned arc_buf.
596 if ((uio
->uio_extflg
== UIO_XUIO
) &&
597 (((xuio_t
*)uio
)->xu_type
== UIOTYPE_ZEROCOPY
))
598 xuio
= (xuio_t
*)uio
;
600 uio_prefaultpages(MIN(n
, max_blksz
), uio
);
601 #endif /* HAVE_UIO_ZEROCOPY */
604 * If in append mode, set the io offset pointer to eof.
606 if (ioflag
& FAPPEND
) {
608 * Obtain an appending range lock to guarantee file append
609 * semantics. We reset the write offset once we have the lock.
611 rl
= zfs_range_lock(zp
, 0, n
, RL_APPEND
);
613 if (rl
->r_len
== UINT64_MAX
) {
615 * We overlocked the file because this write will cause
616 * the file block size to increase.
617 * Note that zp_size cannot change with this lock held.
621 uio
->uio_loffset
= woff
;
624 * Note that if the file block size will change as a result of
625 * this write, then this range lock will lock the entire file
626 * so that we can re-write the block safely.
628 rl
= zfs_range_lock(zp
, woff
, n
, RL_WRITER
);
632 zfs_range_unlock(rl
);
637 if ((woff
+ n
) > limit
|| woff
> (limit
- n
))
640 /* Will this write extend the file length? */
641 write_eof
= (woff
+ n
> zp
->z_size
);
643 end_size
= MAX(zp
->z_size
, woff
+ n
);
646 * Write the file in reasonable size chunks. Each chunk is written
647 * in a separate transaction; this keeps the intent log records small
648 * and allows us to do more fine-grained space accounting.
652 woff
= uio
->uio_loffset
;
654 if (zfs_owner_overquota(zsb
, zp
, B_FALSE
) ||
655 zfs_owner_overquota(zsb
, zp
, B_TRUE
)) {
657 dmu_return_arcbuf(abuf
);
662 if (xuio
&& abuf
== NULL
) {
663 ASSERT(i_iov
< iovcnt
);
665 abuf
= dmu_xuio_arcbuf(xuio
, i_iov
);
666 dmu_xuio_clear(xuio
, i_iov
);
667 ASSERT((aiov
->iov_base
== abuf
->b_data
) ||
668 ((char *)aiov
->iov_base
- (char *)abuf
->b_data
+
669 aiov
->iov_len
== arc_buf_size(abuf
)));
671 } else if (abuf
== NULL
&& n
>= max_blksz
&&
672 woff
>= zp
->z_size
&&
673 P2PHASE(woff
, max_blksz
) == 0 &&
674 zp
->z_blksz
== max_blksz
) {
676 * This write covers a full block. "Borrow" a buffer
677 * from the dmu so that we can fill it before we enter
678 * a transaction. This avoids the possibility of
679 * holding up the transaction if the data copy hangs
680 * up on a pagefault (e.g., from an NFS server mapping).
684 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
686 ASSERT(abuf
!= NULL
);
687 ASSERT(arc_buf_size(abuf
) == max_blksz
);
688 if ((error
= uiocopy(abuf
->b_data
, max_blksz
,
689 UIO_WRITE
, uio
, &cbytes
))) {
690 dmu_return_arcbuf(abuf
);
693 ASSERT(cbytes
== max_blksz
);
697 * Start a transaction.
699 tx
= dmu_tx_create(zsb
->z_os
);
700 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
701 dmu_tx_hold_write(tx
, zp
->z_id
, woff
, MIN(n
, max_blksz
));
702 zfs_sa_upgrade_txholds(tx
, zp
);
703 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
705 if (error
== ERESTART
) {
712 dmu_return_arcbuf(abuf
);
717 * If zfs_range_lock() over-locked we grow the blocksize
718 * and then reduce the lock range. This will only happen
719 * on the first iteration since zfs_range_reduce() will
720 * shrink down r_len to the appropriate size.
722 if (rl
->r_len
== UINT64_MAX
) {
725 if (zp
->z_blksz
> max_blksz
) {
726 ASSERT(!ISP2(zp
->z_blksz
));
727 new_blksz
= MIN(end_size
, SPA_MAXBLOCKSIZE
);
729 new_blksz
= MIN(end_size
, max_blksz
);
731 zfs_grow_blocksize(zp
, new_blksz
, tx
);
732 zfs_range_reduce(rl
, woff
, n
);
736 * XXX - should we really limit each write to z_max_blksz?
737 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
739 nbytes
= MIN(n
, max_blksz
- P2PHASE(woff
, max_blksz
));
742 tx_bytes
= uio
->uio_resid
;
743 error
= dmu_write_uio_dbuf(sa_get_db(zp
->z_sa_hdl
),
745 tx_bytes
-= uio
->uio_resid
;
748 ASSERT(xuio
== NULL
|| tx_bytes
== aiov
->iov_len
);
750 * If this is not a full block write, but we are
751 * extending the file past EOF and this data starts
752 * block-aligned, use assign_arcbuf(). Otherwise,
753 * write via dmu_write().
755 if (tx_bytes
< max_blksz
&& (!write_eof
||
756 aiov
->iov_base
!= abuf
->b_data
)) {
758 dmu_write(zsb
->z_os
, zp
->z_id
, woff
,
759 aiov
->iov_len
, aiov
->iov_base
, tx
);
760 dmu_return_arcbuf(abuf
);
761 xuio_stat_wbuf_copied();
763 ASSERT(xuio
|| tx_bytes
== max_blksz
);
764 dmu_assign_arcbuf(sa_get_db(zp
->z_sa_hdl
),
767 ASSERT(tx_bytes
<= uio
->uio_resid
);
768 uioskip(uio
, tx_bytes
);
771 if (tx_bytes
&& zp
->z_is_mapped
&& !(ioflag
& O_DIRECT
))
772 update_pages(ip
, woff
, tx_bytes
, zsb
->z_os
, zp
->z_id
);
775 * If we made no progress, we're done. If we made even
776 * partial progress, update the znode and ZIL accordingly.
779 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_SIZE(zsb
),
780 (void *)&zp
->z_size
, sizeof (uint64_t), tx
);
787 * Clear Set-UID/Set-GID bits on successful write if not
788 * privileged and at least one of the excute bits is set.
790 * It would be nice to to this after all writes have
791 * been done, but that would still expose the ISUID/ISGID
792 * to another app after the partial write is committed.
794 * Note: we don't call zfs_fuid_map_id() here because
795 * user 0 is not an ephemeral uid.
797 mutex_enter(&zp
->z_acl_lock
);
798 if ((zp
->z_mode
& (S_IXUSR
| (S_IXUSR
>> 3) |
799 (S_IXUSR
>> 6))) != 0 &&
800 (zp
->z_mode
& (S_ISUID
| S_ISGID
)) != 0 &&
801 secpolicy_vnode_setid_retain(cr
,
802 (zp
->z_mode
& S_ISUID
) != 0 && zp
->z_uid
== 0) != 0) {
804 zp
->z_mode
&= ~(S_ISUID
| S_ISGID
);
805 newmode
= zp
->z_mode
;
806 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_MODE(zsb
),
807 (void *)&newmode
, sizeof (uint64_t), tx
);
809 mutex_exit(&zp
->z_acl_lock
);
811 zfs_tstamp_update_setup(zp
, CONTENT_MODIFIED
, mtime
, ctime
,
815 * Update the file size (zp_size) if it has changed;
816 * account for possible concurrent updates.
818 while ((end_size
= zp
->z_size
) < uio
->uio_loffset
) {
819 (void) atomic_cas_64(&zp
->z_size
, end_size
,
824 * If we are replaying and eof is non zero then force
825 * the file size to the specified eof. Note, there's no
826 * concurrency during replay.
828 if (zsb
->z_replay
&& zsb
->z_replay_eof
!= 0)
829 zp
->z_size
= zsb
->z_replay_eof
;
831 error
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, count
, tx
);
833 zfs_log_write(zilog
, tx
, TX_WRITE
, zp
, woff
, tx_bytes
, ioflag
);
838 ASSERT(tx_bytes
== nbytes
);
842 uio_prefaultpages(MIN(n
, max_blksz
), uio
);
845 zfs_range_unlock(rl
);
848 * If we're in replay mode, or we made no progress, return error.
849 * Otherwise, it's at least a partial write, so it's successful.
851 if (zsb
->z_replay
|| uio
->uio_resid
== start_resid
) {
856 if (ioflag
& (FSYNC
| FDSYNC
) ||
857 zsb
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
858 zil_commit(zilog
, zp
->z_id
);
860 zfs_inode_update(zp
);
864 EXPORT_SYMBOL(zfs_write
);
867 iput_async(struct inode
*ip
, taskq_t
*taskq
)
869 ASSERT(atomic_read(&ip
->i_count
) > 0);
870 if (atomic_read(&ip
->i_count
) == 1)
871 taskq_dispatch(taskq
, (task_func_t
*)iput
, ip
, TQ_PUSHPAGE
);
877 zfs_get_done(zgd_t
*zgd
, int error
)
879 znode_t
*zp
= zgd
->zgd_private
;
880 objset_t
*os
= ZTOZSB(zp
)->z_os
;
883 dmu_buf_rele(zgd
->zgd_db
, zgd
);
885 zfs_range_unlock(zgd
->zgd_rl
);
888 * Release the vnode asynchronously as we currently have the
889 * txg stopped from syncing.
891 iput_async(ZTOI(zp
), dsl_pool_iput_taskq(dmu_objset_pool(os
)));
893 if (error
== 0 && zgd
->zgd_bp
)
894 zil_add_block(zgd
->zgd_zilog
, zgd
->zgd_bp
);
896 kmem_free(zgd
, sizeof (zgd_t
));
900 static int zil_fault_io
= 0;
904 * Get data to generate a TX_WRITE intent log record.
907 zfs_get_data(void *arg
, lr_write_t
*lr
, char *buf
, zio_t
*zio
)
910 objset_t
*os
= zsb
->z_os
;
912 uint64_t object
= lr
->lr_foid
;
913 uint64_t offset
= lr
->lr_offset
;
914 uint64_t size
= lr
->lr_length
;
915 blkptr_t
*bp
= &lr
->lr_blkptr
;
924 * Nothing to do if the file has been removed
926 if (zfs_zget(zsb
, object
, &zp
) != 0)
928 if (zp
->z_unlinked
) {
930 * Release the vnode asynchronously as we currently have the
931 * txg stopped from syncing.
933 iput_async(ZTOI(zp
), dsl_pool_iput_taskq(dmu_objset_pool(os
)));
937 zgd
= (zgd_t
*)kmem_zalloc(sizeof (zgd_t
), KM_PUSHPAGE
);
938 zgd
->zgd_zilog
= zsb
->z_log
;
939 zgd
->zgd_private
= zp
;
942 * Write records come in two flavors: immediate and indirect.
943 * For small writes it's cheaper to store the data with the
944 * log record (immediate); for large writes it's cheaper to
945 * sync the data and get a pointer to it (indirect) so that
946 * we don't have to write the data twice.
948 if (buf
!= NULL
) { /* immediate write */
949 zgd
->zgd_rl
= zfs_range_lock(zp
, offset
, size
, RL_READER
);
950 /* test for truncation needs to be done while range locked */
951 if (offset
>= zp
->z_size
) {
954 error
= dmu_read(os
, object
, offset
, size
, buf
,
955 DMU_READ_NO_PREFETCH
);
957 ASSERT(error
== 0 || error
== ENOENT
);
958 } else { /* indirect write */
960 * Have to lock the whole block to ensure when it's
961 * written out and it's checksum is being calculated
962 * that no one can change the data. We need to re-check
963 * blocksize after we get the lock in case it's changed!
968 blkoff
= ISP2(size
) ? P2PHASE(offset
, size
) : offset
;
970 zgd
->zgd_rl
= zfs_range_lock(zp
, offset
, size
,
972 if (zp
->z_blksz
== size
)
975 zfs_range_unlock(zgd
->zgd_rl
);
977 /* test for truncation needs to be done while range locked */
978 if (lr
->lr_offset
>= zp
->z_size
)
987 error
= dmu_buf_hold(os
, object
, offset
, zgd
, &db
,
988 DMU_READ_NO_PREFETCH
);
994 ASSERT(db
->db_offset
== offset
);
995 ASSERT(db
->db_size
== size
);
997 error
= dmu_sync(zio
, lr
->lr_common
.lrc_txg
,
999 ASSERT(error
|| lr
->lr_length
<= zp
->z_blksz
);
1002 * On success, we need to wait for the write I/O
1003 * initiated by dmu_sync() to complete before we can
1004 * release this dbuf. We will finish everything up
1005 * in the zfs_get_done() callback.
1010 if (error
== EALREADY
) {
1011 lr
->lr_common
.lrc_txtype
= TX_WRITE2
;
1017 zfs_get_done(zgd
, error
);
1024 zfs_access(struct inode
*ip
, int mode
, int flag
, cred_t
*cr
)
1026 znode_t
*zp
= ITOZ(ip
);
1027 zfs_sb_t
*zsb
= ITOZSB(ip
);
1033 if (flag
& V_ACE_MASK
)
1034 error
= zfs_zaccess(zp
, mode
, flag
, B_FALSE
, cr
);
1036 error
= zfs_zaccess_rwx(zp
, mode
, flag
, cr
);
1041 EXPORT_SYMBOL(zfs_access
);
1044 * Lookup an entry in a directory, or an extended attribute directory.
1045 * If it exists, return a held inode reference for it.
1047 * IN: dip - inode of directory to search.
1048 * nm - name of entry to lookup.
1049 * flags - LOOKUP_XATTR set if looking for an attribute.
1050 * cr - credentials of caller.
1051 * direntflags - directory lookup flags
1052 * realpnp - returned pathname.
1054 * OUT: ipp - inode of located entry, NULL if not found.
1056 * RETURN: 0 if success
1057 * error code if failure
1064 zfs_lookup(struct inode
*dip
, char *nm
, struct inode
**ipp
, int flags
,
1065 cred_t
*cr
, int *direntflags
, pathname_t
*realpnp
)
1067 znode_t
*zdp
= ITOZ(dip
);
1068 zfs_sb_t
*zsb
= ITOZSB(dip
);
1072 if (!(flags
& (LOOKUP_XATTR
| FIGNORECASE
))) {
1074 if (!S_ISDIR(dip
->i_mode
)) {
1076 } else if (zdp
->z_sa_hdl
== NULL
) {
1080 if (nm
[0] == 0 || (nm
[0] == '.' && nm
[1] == '\0')) {
1081 error
= zfs_fastaccesschk_execute(zdp
, cr
);
1090 vnode_t
*tvp
= dnlc_lookup(dvp
, nm
);
1093 error
= zfs_fastaccesschk_execute(zdp
, cr
);
1098 if (tvp
== DNLC_NO_VNODE
) {
1103 return (specvp_check(vpp
, cr
));
1106 #endif /* HAVE_DNLC */
1115 if (flags
& LOOKUP_XATTR
) {
1117 * We don't allow recursive attributes..
1118 * Maybe someday we will.
1120 if (zdp
->z_pflags
& ZFS_XATTR
) {
1125 if ((error
= zfs_get_xattrdir(zdp
, ipp
, cr
, flags
))) {
1131 * Do we have permission to get into attribute directory?
1134 if ((error
= zfs_zaccess(ITOZ(*ipp
), ACE_EXECUTE
, 0,
1144 if (!S_ISDIR(dip
->i_mode
)) {
1150 * Check accessibility of directory.
1153 if ((error
= zfs_zaccess(zdp
, ACE_EXECUTE
, 0, B_FALSE
, cr
))) {
1158 if (zsb
->z_utf8
&& u8_validate(nm
, strlen(nm
),
1159 NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
1164 error
= zfs_dirlook(zdp
, nm
, ipp
, flags
, direntflags
, realpnp
);
1165 if ((error
== 0) && (*ipp
))
1166 zfs_inode_update(ITOZ(*ipp
));
1171 EXPORT_SYMBOL(zfs_lookup
);
1174 * Attempt to create a new entry in a directory. If the entry
1175 * already exists, truncate the file if permissible, else return
1176 * an error. Return the ip of the created or trunc'd file.
1178 * IN: dip - inode of directory to put new file entry in.
1179 * name - name of new file entry.
1180 * vap - attributes of new file.
1181 * excl - flag indicating exclusive or non-exclusive mode.
1182 * mode - mode to open file with.
1183 * cr - credentials of caller.
1184 * flag - large file flag [UNUSED].
1185 * vsecp - ACL to be set
1187 * OUT: ipp - inode of created or trunc'd entry.
1189 * RETURN: 0 if success
1190 * error code if failure
1193 * dip - ctime|mtime updated if new entry created
1194 * ip - ctime|mtime always, atime if new
1199 zfs_create(struct inode
*dip
, char *name
, vattr_t
*vap
, int excl
,
1200 int mode
, struct inode
**ipp
, cred_t
*cr
, int flag
, vsecattr_t
*vsecp
)
1202 znode_t
*zp
, *dzp
= ITOZ(dip
);
1203 zfs_sb_t
*zsb
= ITOZSB(dip
);
1211 zfs_acl_ids_t acl_ids
;
1212 boolean_t fuid_dirtied
;
1213 boolean_t have_acl
= B_FALSE
;
1216 * If we have an ephemeral id, ACL, or XVATTR then
1217 * make sure file system is at proper version
1223 if (zsb
->z_use_fuids
== B_FALSE
&&
1224 (vsecp
|| IS_EPHEMERAL(uid
) || IS_EPHEMERAL(gid
)))
1232 if (zsb
->z_utf8
&& u8_validate(name
, strlen(name
),
1233 NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
1238 if (vap
->va_mask
& ATTR_XVATTR
) {
1239 if ((error
= secpolicy_xvattr((xvattr_t
*)vap
,
1240 crgetuid(cr
), cr
, vap
->va_mode
)) != 0) {
1248 if (*name
== '\0') {
1250 * Null component name refers to the directory itself.
1257 /* possible igrab(zp) */
1260 if (flag
& FIGNORECASE
)
1263 error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
,
1267 zfs_acl_ids_free(&acl_ids
);
1268 if (strcmp(name
, "..") == 0)
1279 * Create a new file object and update the directory
1282 if ((error
= zfs_zaccess(dzp
, ACE_ADD_FILE
, 0, B_FALSE
, cr
))) {
1284 zfs_acl_ids_free(&acl_ids
);
1289 * We only support the creation of regular files in
1290 * extended attribute directories.
1293 if ((dzp
->z_pflags
& ZFS_XATTR
) && !S_ISREG(vap
->va_mode
)) {
1295 zfs_acl_ids_free(&acl_ids
);
1300 if (!have_acl
&& (error
= zfs_acl_ids_create(dzp
, 0, vap
,
1301 cr
, vsecp
, &acl_ids
)) != 0)
1305 if (zfs_acl_ids_overquota(zsb
, &acl_ids
)) {
1306 zfs_acl_ids_free(&acl_ids
);
1311 tx
= dmu_tx_create(os
);
1313 dmu_tx_hold_sa_create(tx
, acl_ids
.z_aclp
->z_acl_bytes
+
1314 ZFS_SA_BASE_ATTR_SIZE
);
1316 fuid_dirtied
= zsb
->z_fuid_dirty
;
1318 zfs_fuid_txhold(zsb
, tx
);
1319 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, name
);
1320 dmu_tx_hold_sa(tx
, dzp
->z_sa_hdl
, B_FALSE
);
1321 if (!zsb
->z_use_sa
&&
1322 acl_ids
.z_aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
1323 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
,
1324 0, acl_ids
.z_aclp
->z_acl_bytes
);
1326 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
1328 zfs_dirent_unlock(dl
);
1329 if (error
== ERESTART
) {
1334 zfs_acl_ids_free(&acl_ids
);
1339 zfs_mknode(dzp
, vap
, tx
, cr
, 0, &zp
, &acl_ids
);
1342 zfs_fuid_sync(zsb
, tx
);
1344 (void) zfs_link_create(dl
, zp
, tx
, ZNEW
);
1345 txtype
= zfs_log_create_txtype(Z_FILE
, vsecp
, vap
);
1346 if (flag
& FIGNORECASE
)
1348 zfs_log_create(zilog
, tx
, txtype
, dzp
, zp
, name
,
1349 vsecp
, acl_ids
.z_fuidp
, vap
);
1350 zfs_acl_ids_free(&acl_ids
);
1353 int aflags
= (flag
& FAPPEND
) ? V_APPEND
: 0;
1356 zfs_acl_ids_free(&acl_ids
);
1360 * A directory entry already exists for this name.
1363 * Can't truncate an existing file if in exclusive mode.
1370 * Can't open a directory for writing.
1372 if (S_ISDIR(ZTOI(zp
)->i_mode
)) {
1377 * Verify requested access to file.
1379 if (mode
&& (error
= zfs_zaccess_rwx(zp
, mode
, aflags
, cr
))) {
1383 mutex_enter(&dzp
->z_lock
);
1385 mutex_exit(&dzp
->z_lock
);
1388 * Truncate regular files if requested.
1390 if (S_ISREG(ZTOI(zp
)->i_mode
) &&
1391 (vap
->va_mask
& ATTR_SIZE
) && (vap
->va_size
== 0)) {
1392 /* we can't hold any locks when calling zfs_freesp() */
1393 zfs_dirent_unlock(dl
);
1395 error
= zfs_freesp(zp
, 0, 0, mode
, TRUE
);
1401 zfs_dirent_unlock(dl
);
1407 zfs_inode_update(dzp
);
1408 zfs_inode_update(zp
);
1412 if (zsb
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
1413 zil_commit(zilog
, 0);
1418 EXPORT_SYMBOL(zfs_create
);
1421 * Remove an entry from a directory.
1423 * IN: dip - inode of directory to remove entry from.
1424 * name - name of entry to remove.
1425 * cr - credentials of caller.
1427 * RETURN: 0 if success
1428 * error code if failure
1432 * ip - ctime (if nlink > 0)
1435 uint64_t null_xattr
= 0;
1439 zfs_remove(struct inode
*dip
, char *name
, cred_t
*cr
)
1441 znode_t
*zp
, *dzp
= ITOZ(dip
);
1444 zfs_sb_t
*zsb
= ITOZSB(dip
);
1447 uint64_t xattr_obj_unlinked
= 0;
1453 pathname_t
*realnmp
= NULL
;
1454 #ifdef HAVE_PN_UTILS
1456 #endif /* HAVE_PN_UTILS */
1464 #ifdef HAVE_PN_UTILS
1465 if (flags
& FIGNORECASE
) {
1470 #endif /* HAVE_PN_UTILS */
1476 * Attempt to lock directory; fail if entry doesn't exist.
1478 if ((error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
,
1480 #ifdef HAVE_PN_UTILS
1483 #endif /* HAVE_PN_UTILS */
1490 if ((error
= zfs_zaccess_delete(dzp
, zp
, cr
))) {
1495 * Need to use rmdir for removing directories.
1497 if (S_ISDIR(ip
->i_mode
)) {
1504 dnlc_remove(dvp
, realnmp
->pn_buf
);
1506 dnlc_remove(dvp
, name
);
1507 #endif /* HAVE_DNLC */
1510 * We never delete the znode and always place it in the unlinked
1511 * set. The dentry cache will always hold the last reference and
1512 * is responsible for safely freeing the znode.
1515 tx
= dmu_tx_create(zsb
->z_os
);
1516 dmu_tx_hold_zap(tx
, dzp
->z_id
, FALSE
, name
);
1517 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
1518 zfs_sa_upgrade_txholds(tx
, zp
);
1519 zfs_sa_upgrade_txholds(tx
, dzp
);
1521 /* are there any extended attributes? */
1522 error
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_XATTR(zsb
),
1523 &xattr_obj
, sizeof (xattr_obj
));
1524 if (error
== 0 && xattr_obj
) {
1525 error
= zfs_zget(zsb
, xattr_obj
, &xzp
);
1526 ASSERT3U(error
, ==, 0);
1527 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_TRUE
);
1528 dmu_tx_hold_sa(tx
, xzp
->z_sa_hdl
, B_FALSE
);
1531 /* charge as an update -- would be nice not to charge at all */
1532 dmu_tx_hold_zap(tx
, zsb
->z_unlinkedobj
, FALSE
, NULL
);
1534 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
1536 zfs_dirent_unlock(dl
);
1540 if (error
== ERESTART
) {
1545 #ifdef HAVE_PN_UTILS
1548 #endif /* HAVE_PN_UTILS */
1555 * Remove the directory entry.
1557 error
= zfs_link_destroy(dl
, zp
, tx
, zflg
, &unlinked
);
1566 * Hold z_lock so that we can make sure that the ACL obj
1567 * hasn't changed. Could have been deleted due to
1570 mutex_enter(&zp
->z_lock
);
1571 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_XATTR(zsb
),
1572 &xattr_obj_unlinked
, sizeof (xattr_obj_unlinked
));
1573 mutex_exit(&zp
->z_lock
);
1574 zfs_unlinked_add(zp
, tx
);
1578 #ifdef HAVE_PN_UTILS
1579 if (flags
& FIGNORECASE
)
1581 #endif /* HAVE_PN_UTILS */
1582 zfs_log_remove(zilog
, tx
, txtype
, dzp
, name
, obj
);
1586 #ifdef HAVE_PN_UTILS
1589 #endif /* HAVE_PN_UTILS */
1591 zfs_dirent_unlock(dl
);
1592 zfs_inode_update(dzp
);
1593 zfs_inode_update(zp
);
1595 zfs_inode_update(xzp
);
1601 if (zsb
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
1602 zil_commit(zilog
, 0);
1607 EXPORT_SYMBOL(zfs_remove
);
1610 * Create a new directory and insert it into dip using the name
1611 * provided. Return a pointer to the inserted directory.
1613 * IN: dip - inode of directory to add subdir to.
1614 * dirname - name of new directory.
1615 * vap - attributes of new directory.
1616 * cr - credentials of caller.
1617 * vsecp - ACL to be set
1619 * OUT: ipp - inode of created directory.
1621 * RETURN: 0 if success
1622 * error code if failure
1625 * dip - ctime|mtime updated
1626 * ipp - ctime|mtime|atime updated
1630 zfs_mkdir(struct inode
*dip
, char *dirname
, vattr_t
*vap
, struct inode
**ipp
,
1631 cred_t
*cr
, int flags
, vsecattr_t
*vsecp
)
1633 znode_t
*zp
, *dzp
= ITOZ(dip
);
1634 zfs_sb_t
*zsb
= ITOZSB(dip
);
1642 gid_t gid
= crgetgid(cr
);
1643 zfs_acl_ids_t acl_ids
;
1644 boolean_t fuid_dirtied
;
1646 ASSERT(S_ISDIR(vap
->va_mode
));
1649 * If we have an ephemeral id, ACL, or XVATTR then
1650 * make sure file system is at proper version
1654 if (zsb
->z_use_fuids
== B_FALSE
&&
1655 (vsecp
|| IS_EPHEMERAL(uid
) || IS_EPHEMERAL(gid
)))
1662 if (dzp
->z_pflags
& ZFS_XATTR
) {
1667 if (zsb
->z_utf8
&& u8_validate(dirname
,
1668 strlen(dirname
), NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
1672 if (flags
& FIGNORECASE
)
1675 if (vap
->va_mask
& ATTR_XVATTR
) {
1676 if ((error
= secpolicy_xvattr((xvattr_t
*)vap
,
1677 crgetuid(cr
), cr
, vap
->va_mode
)) != 0) {
1683 if ((error
= zfs_acl_ids_create(dzp
, 0, vap
, cr
,
1684 vsecp
, &acl_ids
)) != 0) {
1689 * First make sure the new directory doesn't exist.
1691 * Existence is checked first to make sure we don't return
1692 * EACCES instead of EEXIST which can cause some applications
1698 if ((error
= zfs_dirent_lock(&dl
, dzp
, dirname
, &zp
, zf
,
1700 zfs_acl_ids_free(&acl_ids
);
1705 if ((error
= zfs_zaccess(dzp
, ACE_ADD_SUBDIRECTORY
, 0, B_FALSE
, cr
))) {
1706 zfs_acl_ids_free(&acl_ids
);
1707 zfs_dirent_unlock(dl
);
1712 if (zfs_acl_ids_overquota(zsb
, &acl_ids
)) {
1713 zfs_acl_ids_free(&acl_ids
);
1714 zfs_dirent_unlock(dl
);
1720 * Add a new entry to the directory.
1722 tx
= dmu_tx_create(zsb
->z_os
);
1723 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, dirname
);
1724 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, FALSE
, NULL
);
1725 fuid_dirtied
= zsb
->z_fuid_dirty
;
1727 zfs_fuid_txhold(zsb
, tx
);
1728 if (!zsb
->z_use_sa
&& acl_ids
.z_aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
1729 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
, 0,
1730 acl_ids
.z_aclp
->z_acl_bytes
);
1733 dmu_tx_hold_sa_create(tx
, acl_ids
.z_aclp
->z_acl_bytes
+
1734 ZFS_SA_BASE_ATTR_SIZE
);
1736 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
1738 zfs_dirent_unlock(dl
);
1739 if (error
== ERESTART
) {
1744 zfs_acl_ids_free(&acl_ids
);
1753 zfs_mknode(dzp
, vap
, tx
, cr
, 0, &zp
, &acl_ids
);
1756 zfs_fuid_sync(zsb
, tx
);
1759 * Now put new name in parent dir.
1761 (void) zfs_link_create(dl
, zp
, tx
, ZNEW
);
1765 txtype
= zfs_log_create_txtype(Z_DIR
, vsecp
, vap
);
1766 if (flags
& FIGNORECASE
)
1768 zfs_log_create(zilog
, tx
, txtype
, dzp
, zp
, dirname
, vsecp
,
1769 acl_ids
.z_fuidp
, vap
);
1771 zfs_acl_ids_free(&acl_ids
);
1775 zfs_dirent_unlock(dl
);
1777 if (zsb
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
1778 zil_commit(zilog
, 0);
1780 zfs_inode_update(dzp
);
1781 zfs_inode_update(zp
);
1785 EXPORT_SYMBOL(zfs_mkdir
);
1788 * Remove a directory subdir entry. If the current working
1789 * directory is the same as the subdir to be removed, the
1792 * IN: dip - inode of directory to remove from.
1793 * name - name of directory to be removed.
1794 * cwd - inode of current working directory.
1795 * cr - credentials of caller.
1796 * flags - case flags
1798 * RETURN: 0 if success
1799 * error code if failure
1802 * dip - ctime|mtime updated
1806 zfs_rmdir(struct inode
*dip
, char *name
, struct inode
*cwd
, cred_t
*cr
,
1809 znode_t
*dzp
= ITOZ(dip
);
1812 zfs_sb_t
*zsb
= ITOZSB(dip
);
1823 if (flags
& FIGNORECASE
)
1829 * Attempt to lock directory; fail if entry doesn't exist.
1831 if ((error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
,
1839 if ((error
= zfs_zaccess_delete(dzp
, zp
, cr
))) {
1843 if (!S_ISDIR(ip
->i_mode
)) {
1854 * Grab a lock on the directory to make sure that noone is
1855 * trying to add (or lookup) entries while we are removing it.
1857 rw_enter(&zp
->z_name_lock
, RW_WRITER
);
1860 * Grab a lock on the parent pointer to make sure we play well
1861 * with the treewalk and directory rename code.
1863 rw_enter(&zp
->z_parent_lock
, RW_WRITER
);
1865 tx
= dmu_tx_create(zsb
->z_os
);
1866 dmu_tx_hold_zap(tx
, dzp
->z_id
, FALSE
, name
);
1867 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
1868 dmu_tx_hold_zap(tx
, zsb
->z_unlinkedobj
, FALSE
, NULL
);
1869 zfs_sa_upgrade_txholds(tx
, zp
);
1870 zfs_sa_upgrade_txholds(tx
, dzp
);
1871 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
1873 rw_exit(&zp
->z_parent_lock
);
1874 rw_exit(&zp
->z_name_lock
);
1875 zfs_dirent_unlock(dl
);
1877 if (error
== ERESTART
) {
1887 error
= zfs_link_destroy(dl
, zp
, tx
, zflg
, NULL
);
1890 uint64_t txtype
= TX_RMDIR
;
1891 if (flags
& FIGNORECASE
)
1893 zfs_log_remove(zilog
, tx
, txtype
, dzp
, name
, ZFS_NO_OBJECT
);
1898 rw_exit(&zp
->z_parent_lock
);
1899 rw_exit(&zp
->z_name_lock
);
1901 zfs_dirent_unlock(dl
);
1903 zfs_inode_update(dzp
);
1904 zfs_inode_update(zp
);
1907 if (zsb
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
1908 zil_commit(zilog
, 0);
1913 EXPORT_SYMBOL(zfs_rmdir
);
1916 * Read as many directory entries as will fit into the provided
1917 * dirent buffer from the given directory cursor position.
1919 * IN: ip - inode of directory to read.
1920 * dirent - buffer for directory entries.
1922 * OUT: dirent - filler buffer of directory entries.
1924 * RETURN: 0 if success
1925 * error code if failure
1928 * ip - atime updated
1930 * Note that the low 4 bits of the cookie returned by zap is always zero.
1931 * This allows us to use the low range for "special" directory entries:
1932 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
1933 * we use the offset 2 for the '.zfs' directory.
1937 zfs_readdir(struct inode
*ip
, void *dirent
, filldir_t filldir
,
1938 loff_t
*pos
, cred_t
*cr
)
1940 znode_t
*zp
= ITOZ(ip
);
1941 zfs_sb_t
*zsb
= ITOZSB(ip
);
1944 zap_attribute_t zap
;
1954 if ((error
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_PARENT(zsb
),
1955 &parent
, sizeof (parent
))) != 0)
1959 * Quit if directory has been removed (posix)
1966 prefetch
= zp
->z_zn_prefetch
;
1969 * Initialize the iterator cursor.
1973 * Start iteration from the beginning of the directory.
1975 zap_cursor_init(&zc
, os
, zp
->z_id
);
1978 * The offset is a serialized cursor.
1980 zap_cursor_init_serialized(&zc
, os
, zp
->z_id
, *pos
);
1984 * Transform to file-system independent format
1991 * Special case `.', `..', and `.zfs'.
1994 (void) strcpy(zap
.za_name
, ".");
1995 zap
.za_normalization_conflict
= 0;
1997 } else if (*pos
== 1) {
1998 (void) strcpy(zap
.za_name
, "..");
1999 zap
.za_normalization_conflict
= 0;
2001 } else if (*pos
== 2 && zfs_show_ctldir(zp
)) {
2002 (void) strcpy(zap
.za_name
, ZFS_CTLDIR_NAME
);
2003 zap
.za_normalization_conflict
= 0;
2004 objnum
= ZFSCTL_INO_ROOT
;
2009 if ((error
= zap_cursor_retrieve(&zc
, &zap
))) {
2010 if (error
== ENOENT
)
2017 * Allow multiple entries provided the first entry is
2018 * the object id. Non-zpl consumers may safely make
2019 * use of the additional space.
2021 * XXX: This should be a feature flag for compatibility
2023 if (zap
.za_integer_length
!= 8 ||
2024 zap
.za_num_integers
== 0) {
2025 cmn_err(CE_WARN
, "zap_readdir: bad directory "
2026 "entry, obj = %lld, offset = %lld, "
2027 "length = %d, num = %lld\n",
2028 (u_longlong_t
)zp
->z_id
,
2030 zap
.za_integer_length
,
2031 (u_longlong_t
)zap
.za_num_integers
);
2036 objnum
= ZFS_DIRENT_OBJ(zap
.za_first_integer
);
2038 done
= filldir(dirent
, zap
.za_name
, strlen(zap
.za_name
),
2039 zap_cursor_serialize(&zc
), objnum
, 0);
2044 /* Prefetch znode */
2046 dmu_prefetch(os
, objnum
, 0, 0);
2049 if (*pos
> 2 || (*pos
== 2 && !zfs_show_ctldir(zp
))) {
2050 zap_cursor_advance(&zc
);
2051 *pos
= zap_cursor_serialize(&zc
);
2056 zp
->z_zn_prefetch
= B_FALSE
; /* a lookup will re-enable pre-fetching */
2059 zap_cursor_fini(&zc
);
2060 if (error
== ENOENT
)
2063 ZFS_ACCESSTIME_STAMP(zsb
, zp
);
2064 zfs_inode_update(zp
);
2071 EXPORT_SYMBOL(zfs_readdir
);
2073 ulong_t zfs_fsync_sync_cnt
= 4;
2076 zfs_fsync(struct inode
*ip
, int syncflag
, cred_t
*cr
)
2078 znode_t
*zp
= ITOZ(ip
);
2079 zfs_sb_t
*zsb
= ITOZSB(ip
);
2081 (void) tsd_set(zfs_fsyncer_key
, (void *)zfs_fsync_sync_cnt
);
2083 if (zsb
->z_os
->os_sync
!= ZFS_SYNC_DISABLED
) {
2086 zil_commit(zsb
->z_log
, zp
->z_id
);
2091 EXPORT_SYMBOL(zfs_fsync
);
2095 * Get the requested file attributes and place them in the provided
2098 * IN: ip - inode of file.
2099 * vap - va_mask identifies requested attributes.
2100 * If ATTR_XVATTR set, then optional attrs are requested
2101 * flags - ATTR_NOACLCHECK (CIFS server context)
2102 * cr - credentials of caller.
2104 * OUT: vap - attribute values.
2106 * RETURN: 0 (always succeeds)
2110 zfs_getattr(struct inode
*ip
, vattr_t
*vap
, int flags
, cred_t
*cr
)
2112 znode_t
*zp
= ITOZ(ip
);
2113 zfs_sb_t
*zsb
= ITOZSB(ip
);
2116 uint64_t mtime
[2], ctime
[2];
2117 xvattr_t
*xvap
= (xvattr_t
*)vap
; /* vap may be an xvattr_t * */
2118 xoptattr_t
*xoap
= NULL
;
2119 boolean_t skipaclchk
= (flags
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
2120 sa_bulk_attr_t bulk
[2];
2126 zfs_fuid_map_ids(zp
, cr
, &vap
->va_uid
, &vap
->va_gid
);
2128 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zsb
), NULL
, &mtime
, 16);
2129 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zsb
), NULL
, &ctime
, 16);
2131 if ((error
= sa_bulk_lookup(zp
->z_sa_hdl
, bulk
, count
)) != 0) {
2137 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2138 * Also, if we are the owner don't bother, since owner should
2139 * always be allowed to read basic attributes of file.
2141 if (!(zp
->z_pflags
& ZFS_ACL_TRIVIAL
) &&
2142 (vap
->va_uid
!= crgetuid(cr
))) {
2143 if ((error
= zfs_zaccess(zp
, ACE_READ_ATTRIBUTES
, 0,
2151 * Return all attributes. It's cheaper to provide the answer
2152 * than to determine whether we were asked the question.
2155 mutex_enter(&zp
->z_lock
);
2156 vap
->va_type
= vn_mode_to_vtype(zp
->z_mode
);
2157 vap
->va_mode
= zp
->z_mode
;
2158 vap
->va_fsid
= ZTOI(zp
)->i_sb
->s_dev
;
2159 vap
->va_nodeid
= zp
->z_id
;
2160 if ((zp
->z_id
== zsb
->z_root
) && zfs_show_ctldir(zp
))
2161 links
= zp
->z_links
+ 1;
2163 links
= zp
->z_links
;
2164 vap
->va_nlink
= MIN(links
, ZFS_LINK_MAX
);
2165 vap
->va_size
= i_size_read(ip
);
2166 vap
->va_rdev
= ip
->i_rdev
;
2167 vap
->va_seq
= ip
->i_generation
;
2170 * Add in any requested optional attributes and the create time.
2171 * Also set the corresponding bits in the returned attribute bitmap.
2173 if ((xoap
= xva_getxoptattr(xvap
)) != NULL
&& zsb
->z_use_fuids
) {
2174 if (XVA_ISSET_REQ(xvap
, XAT_ARCHIVE
)) {
2176 ((zp
->z_pflags
& ZFS_ARCHIVE
) != 0);
2177 XVA_SET_RTN(xvap
, XAT_ARCHIVE
);
2180 if (XVA_ISSET_REQ(xvap
, XAT_READONLY
)) {
2181 xoap
->xoa_readonly
=
2182 ((zp
->z_pflags
& ZFS_READONLY
) != 0);
2183 XVA_SET_RTN(xvap
, XAT_READONLY
);
2186 if (XVA_ISSET_REQ(xvap
, XAT_SYSTEM
)) {
2188 ((zp
->z_pflags
& ZFS_SYSTEM
) != 0);
2189 XVA_SET_RTN(xvap
, XAT_SYSTEM
);
2192 if (XVA_ISSET_REQ(xvap
, XAT_HIDDEN
)) {
2194 ((zp
->z_pflags
& ZFS_HIDDEN
) != 0);
2195 XVA_SET_RTN(xvap
, XAT_HIDDEN
);
2198 if (XVA_ISSET_REQ(xvap
, XAT_NOUNLINK
)) {
2199 xoap
->xoa_nounlink
=
2200 ((zp
->z_pflags
& ZFS_NOUNLINK
) != 0);
2201 XVA_SET_RTN(xvap
, XAT_NOUNLINK
);
2204 if (XVA_ISSET_REQ(xvap
, XAT_IMMUTABLE
)) {
2205 xoap
->xoa_immutable
=
2206 ((zp
->z_pflags
& ZFS_IMMUTABLE
) != 0);
2207 XVA_SET_RTN(xvap
, XAT_IMMUTABLE
);
2210 if (XVA_ISSET_REQ(xvap
, XAT_APPENDONLY
)) {
2211 xoap
->xoa_appendonly
=
2212 ((zp
->z_pflags
& ZFS_APPENDONLY
) != 0);
2213 XVA_SET_RTN(xvap
, XAT_APPENDONLY
);
2216 if (XVA_ISSET_REQ(xvap
, XAT_NODUMP
)) {
2218 ((zp
->z_pflags
& ZFS_NODUMP
) != 0);
2219 XVA_SET_RTN(xvap
, XAT_NODUMP
);
2222 if (XVA_ISSET_REQ(xvap
, XAT_OPAQUE
)) {
2224 ((zp
->z_pflags
& ZFS_OPAQUE
) != 0);
2225 XVA_SET_RTN(xvap
, XAT_OPAQUE
);
2228 if (XVA_ISSET_REQ(xvap
, XAT_AV_QUARANTINED
)) {
2229 xoap
->xoa_av_quarantined
=
2230 ((zp
->z_pflags
& ZFS_AV_QUARANTINED
) != 0);
2231 XVA_SET_RTN(xvap
, XAT_AV_QUARANTINED
);
2234 if (XVA_ISSET_REQ(xvap
, XAT_AV_MODIFIED
)) {
2235 xoap
->xoa_av_modified
=
2236 ((zp
->z_pflags
& ZFS_AV_MODIFIED
) != 0);
2237 XVA_SET_RTN(xvap
, XAT_AV_MODIFIED
);
2240 if (XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
) &&
2241 S_ISREG(ip
->i_mode
)) {
2242 zfs_sa_get_scanstamp(zp
, xvap
);
2245 if (XVA_ISSET_REQ(xvap
, XAT_CREATETIME
)) {
2248 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_CRTIME(zsb
),
2249 times
, sizeof (times
));
2250 ZFS_TIME_DECODE(&xoap
->xoa_createtime
, times
);
2251 XVA_SET_RTN(xvap
, XAT_CREATETIME
);
2254 if (XVA_ISSET_REQ(xvap
, XAT_REPARSE
)) {
2255 xoap
->xoa_reparse
= ((zp
->z_pflags
& ZFS_REPARSE
) != 0);
2256 XVA_SET_RTN(xvap
, XAT_REPARSE
);
2258 if (XVA_ISSET_REQ(xvap
, XAT_GEN
)) {
2259 xoap
->xoa_generation
= zp
->z_gen
;
2260 XVA_SET_RTN(xvap
, XAT_GEN
);
2263 if (XVA_ISSET_REQ(xvap
, XAT_OFFLINE
)) {
2265 ((zp
->z_pflags
& ZFS_OFFLINE
) != 0);
2266 XVA_SET_RTN(xvap
, XAT_OFFLINE
);
2269 if (XVA_ISSET_REQ(xvap
, XAT_SPARSE
)) {
2271 ((zp
->z_pflags
& ZFS_SPARSE
) != 0);
2272 XVA_SET_RTN(xvap
, XAT_SPARSE
);
2276 ZFS_TIME_DECODE(&vap
->va_atime
, zp
->z_atime
);
2277 ZFS_TIME_DECODE(&vap
->va_mtime
, mtime
);
2278 ZFS_TIME_DECODE(&vap
->va_ctime
, ctime
);
2280 mutex_exit(&zp
->z_lock
);
2282 sa_object_size(zp
->z_sa_hdl
, &vap
->va_blksize
, &vap
->va_nblocks
);
2284 if (zp
->z_blksz
== 0) {
2286 * Block size hasn't been set; suggest maximal I/O transfers.
2288 vap
->va_blksize
= zsb
->z_max_blksz
;
2294 EXPORT_SYMBOL(zfs_getattr
);
2297 * Get the basic file attributes and place them in the provided kstat
2298 * structure. The inode is assumed to be the authoritative source
2299 * for most of the attributes. However, the znode currently has the
2300 * authoritative atime, blksize, and block count.
2302 * IN: ip - inode of file.
2304 * OUT: sp - kstat values.
2306 * RETURN: 0 (always succeeds)
2310 zfs_getattr_fast(struct inode
*ip
, struct kstat
*sp
)
2312 znode_t
*zp
= ITOZ(ip
);
2313 zfs_sb_t
*zsb
= ITOZSB(ip
);
2318 mutex_enter(&zp
->z_lock
);
2320 generic_fillattr(ip
, sp
);
2321 ZFS_TIME_DECODE(&sp
->atime
, zp
->z_atime
);
2323 sa_object_size(zp
->z_sa_hdl
, (uint32_t *)&sp
->blksize
, &sp
->blocks
);
2324 if (unlikely(zp
->z_blksz
== 0)) {
2326 * Block size hasn't been set; suggest maximal I/O transfers.
2328 sp
->blksize
= zsb
->z_max_blksz
;
2331 mutex_exit(&zp
->z_lock
);
2337 EXPORT_SYMBOL(zfs_getattr_fast
);
2340 * Set the file attributes to the values contained in the
2343 * IN: ip - inode of file to be modified.
2344 * vap - new attribute values.
2345 * If ATTR_XVATTR set, then optional attrs are being set
2346 * flags - ATTR_UTIME set if non-default time values provided.
2347 * - ATTR_NOACLCHECK (CIFS context only).
2348 * cr - credentials of caller.
2350 * RETURN: 0 if success
2351 * error code if failure
2354 * ip - ctime updated, mtime updated if size changed.
2358 zfs_setattr(struct inode
*ip
, vattr_t
*vap
, int flags
, cred_t
*cr
)
2360 znode_t
*zp
= ITOZ(ip
);
2361 zfs_sb_t
*zsb
= ITOZSB(ip
);
2365 xvattr_t
*tmpxvattr
;
2366 uint_t mask
= vap
->va_mask
;
2370 uint64_t new_uid
, new_gid
;
2372 uint64_t mtime
[2], ctime
[2];
2374 int need_policy
= FALSE
;
2376 zfs_fuid_info_t
*fuidp
= NULL
;
2377 xvattr_t
*xvap
= (xvattr_t
*)vap
; /* vap may be an xvattr_t * */
2380 boolean_t skipaclchk
= (flags
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
2381 boolean_t fuid_dirtied
= B_FALSE
;
2382 sa_bulk_attr_t
*bulk
, *xattr_bulk
;
2383 int count
= 0, xattr_count
= 0;
2394 * Make sure that if we have ephemeral uid/gid or xvattr specified
2395 * that file system is at proper version level
2398 if (zsb
->z_use_fuids
== B_FALSE
&&
2399 (((mask
& ATTR_UID
) && IS_EPHEMERAL(vap
->va_uid
)) ||
2400 ((mask
& ATTR_GID
) && IS_EPHEMERAL(vap
->va_gid
)) ||
2401 (mask
& ATTR_XVATTR
))) {
2406 if (mask
& ATTR_SIZE
&& S_ISDIR(ip
->i_mode
)) {
2411 if (mask
& ATTR_SIZE
&& !S_ISREG(ip
->i_mode
) && !S_ISFIFO(ip
->i_mode
)) {
2417 * If this is an xvattr_t, then get a pointer to the structure of
2418 * optional attributes. If this is NULL, then we have a vattr_t.
2420 xoap
= xva_getxoptattr(xvap
);
2422 tmpxvattr
= kmem_alloc(sizeof(xvattr_t
), KM_SLEEP
);
2423 xva_init(tmpxvattr
);
2425 bulk
= kmem_alloc(sizeof(sa_bulk_attr_t
) * 7, KM_SLEEP
);
2426 xattr_bulk
= kmem_alloc(sizeof(sa_bulk_attr_t
) * 7, KM_SLEEP
);
2429 * Immutable files can only alter immutable bit and atime
2431 if ((zp
->z_pflags
& ZFS_IMMUTABLE
) &&
2432 ((mask
& (ATTR_SIZE
|ATTR_UID
|ATTR_GID
|ATTR_MTIME
|ATTR_MODE
)) ||
2433 ((mask
& ATTR_XVATTR
) && XVA_ISSET_REQ(xvap
, XAT_CREATETIME
)))) {
2438 if ((mask
& ATTR_SIZE
) && (zp
->z_pflags
& ZFS_READONLY
)) {
2444 * Verify timestamps doesn't overflow 32 bits.
2445 * ZFS can handle large timestamps, but 32bit syscalls can't
2446 * handle times greater than 2039. This check should be removed
2447 * once large timestamps are fully supported.
2449 if (mask
& (ATTR_ATIME
| ATTR_MTIME
)) {
2450 if (((mask
& ATTR_ATIME
) && TIMESPEC_OVERFLOW(&vap
->va_atime
)) ||
2451 ((mask
& ATTR_MTIME
) && TIMESPEC_OVERFLOW(&vap
->va_mtime
))) {
2461 /* Can this be moved to before the top label? */
2462 if (zfs_is_readonly(zsb
)) {
2468 * First validate permissions
2471 if (mask
& ATTR_SIZE
) {
2472 err
= zfs_zaccess(zp
, ACE_WRITE_DATA
, 0, skipaclchk
, cr
);
2476 truncate_setsize(ip
, vap
->va_size
);
2479 * XXX - Note, we are not providing any open
2480 * mode flags here (like FNDELAY), so we may
2481 * block if there are locks present... this
2482 * should be addressed in openat().
2484 /* XXX - would it be OK to generate a log record here? */
2485 err
= zfs_freesp(zp
, vap
->va_size
, 0, 0, FALSE
);
2490 if (mask
& (ATTR_ATIME
|ATTR_MTIME
) ||
2491 ((mask
& ATTR_XVATTR
) && (XVA_ISSET_REQ(xvap
, XAT_HIDDEN
) ||
2492 XVA_ISSET_REQ(xvap
, XAT_READONLY
) ||
2493 XVA_ISSET_REQ(xvap
, XAT_ARCHIVE
) ||
2494 XVA_ISSET_REQ(xvap
, XAT_OFFLINE
) ||
2495 XVA_ISSET_REQ(xvap
, XAT_SPARSE
) ||
2496 XVA_ISSET_REQ(xvap
, XAT_CREATETIME
) ||
2497 XVA_ISSET_REQ(xvap
, XAT_SYSTEM
)))) {
2498 need_policy
= zfs_zaccess(zp
, ACE_WRITE_ATTRIBUTES
, 0,
2502 if (mask
& (ATTR_UID
|ATTR_GID
)) {
2503 int idmask
= (mask
& (ATTR_UID
|ATTR_GID
));
2508 * NOTE: even if a new mode is being set,
2509 * we may clear S_ISUID/S_ISGID bits.
2512 if (!(mask
& ATTR_MODE
))
2513 vap
->va_mode
= zp
->z_mode
;
2516 * Take ownership or chgrp to group we are a member of
2519 take_owner
= (mask
& ATTR_UID
) && (vap
->va_uid
== crgetuid(cr
));
2520 take_group
= (mask
& ATTR_GID
) &&
2521 zfs_groupmember(zsb
, vap
->va_gid
, cr
);
2524 * If both ATTR_UID and ATTR_GID are set then take_owner and
2525 * take_group must both be set in order to allow taking
2528 * Otherwise, send the check through secpolicy_vnode_setattr()
2532 if (((idmask
== (ATTR_UID
|ATTR_GID
)) &&
2533 take_owner
&& take_group
) ||
2534 ((idmask
== ATTR_UID
) && take_owner
) ||
2535 ((idmask
== ATTR_GID
) && take_group
)) {
2536 if (zfs_zaccess(zp
, ACE_WRITE_OWNER
, 0,
2537 skipaclchk
, cr
) == 0) {
2539 * Remove setuid/setgid for non-privileged users
2541 (void) secpolicy_setid_clear(vap
, cr
);
2542 trim_mask
= (mask
& (ATTR_UID
|ATTR_GID
));
2551 mutex_enter(&zp
->z_lock
);
2552 oldva
.va_mode
= zp
->z_mode
;
2553 zfs_fuid_map_ids(zp
, cr
, &oldva
.va_uid
, &oldva
.va_gid
);
2554 if (mask
& ATTR_XVATTR
) {
2556 * Update xvattr mask to include only those attributes
2557 * that are actually changing.
2559 * the bits will be restored prior to actually setting
2560 * the attributes so the caller thinks they were set.
2562 if (XVA_ISSET_REQ(xvap
, XAT_APPENDONLY
)) {
2563 if (xoap
->xoa_appendonly
!=
2564 ((zp
->z_pflags
& ZFS_APPENDONLY
) != 0)) {
2567 XVA_CLR_REQ(xvap
, XAT_APPENDONLY
);
2568 XVA_SET_REQ(tmpxvattr
, XAT_APPENDONLY
);
2572 if (XVA_ISSET_REQ(xvap
, XAT_NOUNLINK
)) {
2573 if (xoap
->xoa_nounlink
!=
2574 ((zp
->z_pflags
& ZFS_NOUNLINK
) != 0)) {
2577 XVA_CLR_REQ(xvap
, XAT_NOUNLINK
);
2578 XVA_SET_REQ(tmpxvattr
, XAT_NOUNLINK
);
2582 if (XVA_ISSET_REQ(xvap
, XAT_IMMUTABLE
)) {
2583 if (xoap
->xoa_immutable
!=
2584 ((zp
->z_pflags
& ZFS_IMMUTABLE
) != 0)) {
2587 XVA_CLR_REQ(xvap
, XAT_IMMUTABLE
);
2588 XVA_SET_REQ(tmpxvattr
, XAT_IMMUTABLE
);
2592 if (XVA_ISSET_REQ(xvap
, XAT_NODUMP
)) {
2593 if (xoap
->xoa_nodump
!=
2594 ((zp
->z_pflags
& ZFS_NODUMP
) != 0)) {
2597 XVA_CLR_REQ(xvap
, XAT_NODUMP
);
2598 XVA_SET_REQ(tmpxvattr
, XAT_NODUMP
);
2602 if (XVA_ISSET_REQ(xvap
, XAT_AV_MODIFIED
)) {
2603 if (xoap
->xoa_av_modified
!=
2604 ((zp
->z_pflags
& ZFS_AV_MODIFIED
) != 0)) {
2607 XVA_CLR_REQ(xvap
, XAT_AV_MODIFIED
);
2608 XVA_SET_REQ(tmpxvattr
, XAT_AV_MODIFIED
);
2612 if (XVA_ISSET_REQ(xvap
, XAT_AV_QUARANTINED
)) {
2613 if ((!S_ISREG(ip
->i_mode
) &&
2614 xoap
->xoa_av_quarantined
) ||
2615 xoap
->xoa_av_quarantined
!=
2616 ((zp
->z_pflags
& ZFS_AV_QUARANTINED
) != 0)) {
2619 XVA_CLR_REQ(xvap
, XAT_AV_QUARANTINED
);
2620 XVA_SET_REQ(tmpxvattr
, XAT_AV_QUARANTINED
);
2624 if (XVA_ISSET_REQ(xvap
, XAT_REPARSE
)) {
2625 mutex_exit(&zp
->z_lock
);
2630 if (need_policy
== FALSE
&&
2631 (XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
) ||
2632 XVA_ISSET_REQ(xvap
, XAT_OPAQUE
))) {
2637 mutex_exit(&zp
->z_lock
);
2639 if (mask
& ATTR_MODE
) {
2640 if (zfs_zaccess(zp
, ACE_WRITE_ACL
, 0, skipaclchk
, cr
) == 0) {
2641 err
= secpolicy_setid_setsticky_clear(ip
, vap
,
2646 trim_mask
|= ATTR_MODE
;
2654 * If trim_mask is set then take ownership
2655 * has been granted or write_acl is present and user
2656 * has the ability to modify mode. In that case remove
2657 * UID|GID and or MODE from mask so that
2658 * secpolicy_vnode_setattr() doesn't revoke it.
2662 saved_mask
= vap
->va_mask
;
2663 vap
->va_mask
&= ~trim_mask
;
2665 err
= secpolicy_vnode_setattr(cr
, ip
, vap
, &oldva
, flags
,
2666 (int (*)(void *, int, cred_t
*))zfs_zaccess_unix
, zp
);
2671 vap
->va_mask
|= saved_mask
;
2675 * secpolicy_vnode_setattr, or take ownership may have
2678 mask
= vap
->va_mask
;
2680 if ((mask
& (ATTR_UID
| ATTR_GID
))) {
2681 err
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_XATTR(zsb
),
2682 &xattr_obj
, sizeof (xattr_obj
));
2684 if (err
== 0 && xattr_obj
) {
2685 err
= zfs_zget(ZTOZSB(zp
), xattr_obj
, &attrzp
);
2689 if (mask
& ATTR_UID
) {
2690 new_uid
= zfs_fuid_create(zsb
,
2691 (uint64_t)vap
->va_uid
, cr
, ZFS_OWNER
, &fuidp
);
2692 if (new_uid
!= zp
->z_uid
&&
2693 zfs_fuid_overquota(zsb
, B_FALSE
, new_uid
)) {
2701 if (mask
& ATTR_GID
) {
2702 new_gid
= zfs_fuid_create(zsb
, (uint64_t)vap
->va_gid
,
2703 cr
, ZFS_GROUP
, &fuidp
);
2704 if (new_gid
!= zp
->z_gid
&&
2705 zfs_fuid_overquota(zsb
, B_TRUE
, new_gid
)) {
2713 tx
= dmu_tx_create(zsb
->z_os
);
2715 if (mask
& ATTR_MODE
) {
2716 uint64_t pmode
= zp
->z_mode
;
2718 new_mode
= (pmode
& S_IFMT
) | (vap
->va_mode
& ~S_IFMT
);
2720 zfs_acl_chmod_setattr(zp
, &aclp
, new_mode
);
2722 mutex_enter(&zp
->z_lock
);
2723 if (!zp
->z_is_sa
&& ((acl_obj
= zfs_external_acl(zp
)) != 0)) {
2725 * Are we upgrading ACL from old V0 format
2728 if (zsb
->z_version
>= ZPL_VERSION_FUID
&&
2729 zfs_znode_acl_version(zp
) ==
2730 ZFS_ACL_VERSION_INITIAL
) {
2731 dmu_tx_hold_free(tx
, acl_obj
, 0,
2733 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
,
2734 0, aclp
->z_acl_bytes
);
2736 dmu_tx_hold_write(tx
, acl_obj
, 0,
2739 } else if (!zp
->z_is_sa
&& aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
2740 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
,
2741 0, aclp
->z_acl_bytes
);
2743 mutex_exit(&zp
->z_lock
);
2744 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_TRUE
);
2746 if ((mask
& ATTR_XVATTR
) &&
2747 XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
))
2748 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_TRUE
);
2750 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
2754 dmu_tx_hold_sa(tx
, attrzp
->z_sa_hdl
, B_FALSE
);
2757 fuid_dirtied
= zsb
->z_fuid_dirty
;
2759 zfs_fuid_txhold(zsb
, tx
);
2761 zfs_sa_upgrade_txholds(tx
, zp
);
2763 err
= dmu_tx_assign(tx
, TXG_NOWAIT
);
2765 if (err
== ERESTART
)
2772 * Set each attribute requested.
2773 * We group settings according to the locks they need to acquire.
2775 * Note: you cannot set ctime directly, although it will be
2776 * updated as a side-effect of calling this function.
2780 if (mask
& (ATTR_UID
|ATTR_GID
|ATTR_MODE
))
2781 mutex_enter(&zp
->z_acl_lock
);
2782 mutex_enter(&zp
->z_lock
);
2784 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zsb
), NULL
,
2785 &zp
->z_pflags
, sizeof (zp
->z_pflags
));
2788 if (mask
& (ATTR_UID
|ATTR_GID
|ATTR_MODE
))
2789 mutex_enter(&attrzp
->z_acl_lock
);
2790 mutex_enter(&attrzp
->z_lock
);
2791 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
2792 SA_ZPL_FLAGS(zsb
), NULL
, &attrzp
->z_pflags
,
2793 sizeof (attrzp
->z_pflags
));
2796 if (mask
& (ATTR_UID
|ATTR_GID
)) {
2798 if (mask
& ATTR_UID
) {
2799 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_UID(zsb
), NULL
,
2800 &new_uid
, sizeof (new_uid
));
2801 zp
->z_uid
= new_uid
;
2803 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
2804 SA_ZPL_UID(zsb
), NULL
, &new_uid
,
2806 attrzp
->z_uid
= new_uid
;
2810 if (mask
& ATTR_GID
) {
2811 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_GID(zsb
),
2812 NULL
, &new_gid
, sizeof (new_gid
));
2813 zp
->z_gid
= new_gid
;
2815 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
2816 SA_ZPL_GID(zsb
), NULL
, &new_gid
,
2818 attrzp
->z_gid
= new_gid
;
2821 if (!(mask
& ATTR_MODE
)) {
2822 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MODE(zsb
),
2823 NULL
, &new_mode
, sizeof (new_mode
));
2824 new_mode
= zp
->z_mode
;
2826 err
= zfs_acl_chown_setattr(zp
);
2829 err
= zfs_acl_chown_setattr(attrzp
);
2834 if (mask
& ATTR_MODE
) {
2835 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MODE(zsb
), NULL
,
2836 &new_mode
, sizeof (new_mode
));
2837 zp
->z_mode
= new_mode
;
2838 ASSERT3P(aclp
, !=, NULL
);
2839 err
= zfs_aclset_common(zp
, aclp
, cr
, tx
);
2840 ASSERT3U(err
, ==, 0);
2841 if (zp
->z_acl_cached
)
2842 zfs_acl_free(zp
->z_acl_cached
);
2843 zp
->z_acl_cached
= aclp
;
2848 if (mask
& ATTR_ATIME
) {
2849 ZFS_TIME_ENCODE(&vap
->va_atime
, zp
->z_atime
);
2850 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_ATIME(zsb
), NULL
,
2851 &zp
->z_atime
, sizeof (zp
->z_atime
));
2854 if (mask
& ATTR_MTIME
) {
2855 ZFS_TIME_ENCODE(&vap
->va_mtime
, mtime
);
2856 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zsb
), NULL
,
2857 mtime
, sizeof (mtime
));
2860 /* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */
2861 if (mask
& ATTR_SIZE
&& !(mask
& ATTR_MTIME
)) {
2862 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zsb
),
2863 NULL
, mtime
, sizeof (mtime
));
2864 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zsb
), NULL
,
2865 &ctime
, sizeof (ctime
));
2866 zfs_tstamp_update_setup(zp
, CONTENT_MODIFIED
, mtime
, ctime
,
2868 } else if (mask
!= 0) {
2869 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zsb
), NULL
,
2870 &ctime
, sizeof (ctime
));
2871 zfs_tstamp_update_setup(zp
, STATE_CHANGED
, mtime
, ctime
,
2874 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
2875 SA_ZPL_CTIME(zsb
), NULL
,
2876 &ctime
, sizeof (ctime
));
2877 zfs_tstamp_update_setup(attrzp
, STATE_CHANGED
,
2878 mtime
, ctime
, B_TRUE
);
2882 * Do this after setting timestamps to prevent timestamp
2883 * update from toggling bit
2886 if (xoap
&& (mask
& ATTR_XVATTR
)) {
2889 * restore trimmed off masks
2890 * so that return masks can be set for caller.
2893 if (XVA_ISSET_REQ(tmpxvattr
, XAT_APPENDONLY
)) {
2894 XVA_SET_REQ(xvap
, XAT_APPENDONLY
);
2896 if (XVA_ISSET_REQ(tmpxvattr
, XAT_NOUNLINK
)) {
2897 XVA_SET_REQ(xvap
, XAT_NOUNLINK
);
2899 if (XVA_ISSET_REQ(tmpxvattr
, XAT_IMMUTABLE
)) {
2900 XVA_SET_REQ(xvap
, XAT_IMMUTABLE
);
2902 if (XVA_ISSET_REQ(tmpxvattr
, XAT_NODUMP
)) {
2903 XVA_SET_REQ(xvap
, XAT_NODUMP
);
2905 if (XVA_ISSET_REQ(tmpxvattr
, XAT_AV_MODIFIED
)) {
2906 XVA_SET_REQ(xvap
, XAT_AV_MODIFIED
);
2908 if (XVA_ISSET_REQ(tmpxvattr
, XAT_AV_QUARANTINED
)) {
2909 XVA_SET_REQ(xvap
, XAT_AV_QUARANTINED
);
2912 if (XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
))
2913 ASSERT(S_ISREG(ip
->i_mode
));
2915 zfs_xvattr_set(zp
, xvap
, tx
);
2919 zfs_fuid_sync(zsb
, tx
);
2922 zfs_log_setattr(zilog
, tx
, TX_SETATTR
, zp
, vap
, mask
, fuidp
);
2924 mutex_exit(&zp
->z_lock
);
2925 if (mask
& (ATTR_UID
|ATTR_GID
|ATTR_MODE
))
2926 mutex_exit(&zp
->z_acl_lock
);
2929 if (mask
& (ATTR_UID
|ATTR_GID
|ATTR_MODE
))
2930 mutex_exit(&attrzp
->z_acl_lock
);
2931 mutex_exit(&attrzp
->z_lock
);
2934 if (err
== 0 && attrzp
) {
2935 err2
= sa_bulk_update(attrzp
->z_sa_hdl
, xattr_bulk
,
2946 zfs_fuid_info_free(fuidp
);
2952 if (err
== ERESTART
)
2955 err2
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, count
, tx
);
2957 zfs_inode_update(zp
);
2961 if (zsb
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
2962 zil_commit(zilog
, 0);
2965 kmem_free(xattr_bulk
, sizeof(sa_bulk_attr_t
) * 7);
2966 kmem_free(bulk
, sizeof(sa_bulk_attr_t
) * 7);
2967 kmem_free(tmpxvattr
, sizeof(xvattr_t
));
2971 EXPORT_SYMBOL(zfs_setattr
);
2973 typedef struct zfs_zlock
{
2974 krwlock_t
*zl_rwlock
; /* lock we acquired */
2975 znode_t
*zl_znode
; /* znode we held */
2976 struct zfs_zlock
*zl_next
; /* next in list */
2980 * Drop locks and release vnodes that were held by zfs_rename_lock().
2983 zfs_rename_unlock(zfs_zlock_t
**zlpp
)
2987 while ((zl
= *zlpp
) != NULL
) {
2988 if (zl
->zl_znode
!= NULL
)
2989 iput(ZTOI(zl
->zl_znode
));
2990 rw_exit(zl
->zl_rwlock
);
2991 *zlpp
= zl
->zl_next
;
2992 kmem_free(zl
, sizeof (*zl
));
2997 * Search back through the directory tree, using the ".." entries.
2998 * Lock each directory in the chain to prevent concurrent renames.
2999 * Fail any attempt to move a directory into one of its own descendants.
3000 * XXX - z_parent_lock can overlap with map or grow locks
3003 zfs_rename_lock(znode_t
*szp
, znode_t
*tdzp
, znode_t
*sdzp
, zfs_zlock_t
**zlpp
)
3007 uint64_t rootid
= ZTOZSB(zp
)->z_root
;
3008 uint64_t oidp
= zp
->z_id
;
3009 krwlock_t
*rwlp
= &szp
->z_parent_lock
;
3010 krw_t rw
= RW_WRITER
;
3013 * First pass write-locks szp and compares to zp->z_id.
3014 * Later passes read-lock zp and compare to zp->z_parent.
3017 if (!rw_tryenter(rwlp
, rw
)) {
3019 * Another thread is renaming in this path.
3020 * Note that if we are a WRITER, we don't have any
3021 * parent_locks held yet.
3023 if (rw
== RW_READER
&& zp
->z_id
> szp
->z_id
) {
3025 * Drop our locks and restart
3027 zfs_rename_unlock(&zl
);
3031 rwlp
= &szp
->z_parent_lock
;
3036 * Wait for other thread to drop its locks
3042 zl
= kmem_alloc(sizeof (*zl
), KM_SLEEP
);
3043 zl
->zl_rwlock
= rwlp
;
3044 zl
->zl_znode
= NULL
;
3045 zl
->zl_next
= *zlpp
;
3048 if (oidp
== szp
->z_id
) /* We're a descendant of szp */
3051 if (oidp
== rootid
) /* We've hit the top */
3054 if (rw
== RW_READER
) { /* i.e. not the first pass */
3055 int error
= zfs_zget(ZTOZSB(zp
), oidp
, &zp
);
3060 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_PARENT(ZTOZSB(zp
)),
3061 &oidp
, sizeof (oidp
));
3062 rwlp
= &zp
->z_parent_lock
;
3065 } while (zp
->z_id
!= sdzp
->z_id
);
3071 * Move an entry from the provided source directory to the target
3072 * directory. Change the entry name as indicated.
3074 * IN: sdip - Source directory containing the "old entry".
3075 * snm - Old entry name.
3076 * tdip - Target directory to contain the "new entry".
3077 * tnm - New entry name.
3078 * cr - credentials of caller.
3079 * flags - case flags
3081 * RETURN: 0 if success
3082 * error code if failure
3085 * sdip,tdip - ctime|mtime updated
3089 zfs_rename(struct inode
*sdip
, char *snm
, struct inode
*tdip
, char *tnm
,
3090 cred_t
*cr
, int flags
)
3092 znode_t
*tdzp
, *szp
, *tzp
;
3093 znode_t
*sdzp
= ITOZ(sdip
);
3094 zfs_sb_t
*zsb
= ITOZSB(sdip
);
3096 zfs_dirlock_t
*sdl
, *tdl
;
3099 int cmp
, serr
, terr
;
3104 ZFS_VERIFY_ZP(sdzp
);
3107 if (tdip
->i_sb
!= sdip
->i_sb
) {
3113 ZFS_VERIFY_ZP(tdzp
);
3114 if (zsb
->z_utf8
&& u8_validate(tnm
,
3115 strlen(tnm
), NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
3120 if (flags
& FIGNORECASE
)
3129 * This is to prevent the creation of links into attribute space
3130 * by renaming a linked file into/outof an attribute directory.
3131 * See the comment in zfs_link() for why this is considered bad.
3133 if ((tdzp
->z_pflags
& ZFS_XATTR
) != (sdzp
->z_pflags
& ZFS_XATTR
)) {
3139 * Lock source and target directory entries. To prevent deadlock,
3140 * a lock ordering must be defined. We lock the directory with
3141 * the smallest object id first, or if it's a tie, the one with
3142 * the lexically first name.
3144 if (sdzp
->z_id
< tdzp
->z_id
) {
3146 } else if (sdzp
->z_id
> tdzp
->z_id
) {
3150 * First compare the two name arguments without
3151 * considering any case folding.
3153 int nofold
= (zsb
->z_norm
& ~U8_TEXTPREP_TOUPPER
);
3155 cmp
= u8_strcmp(snm
, tnm
, 0, nofold
, U8_UNICODE_LATEST
, &error
);
3156 ASSERT(error
== 0 || !zsb
->z_utf8
);
3159 * POSIX: "If the old argument and the new argument
3160 * both refer to links to the same existing file,
3161 * the rename() function shall return successfully
3162 * and perform no other action."
3168 * If the file system is case-folding, then we may
3169 * have some more checking to do. A case-folding file
3170 * system is either supporting mixed case sensitivity
3171 * access or is completely case-insensitive. Note
3172 * that the file system is always case preserving.
3174 * In mixed sensitivity mode case sensitive behavior
3175 * is the default. FIGNORECASE must be used to
3176 * explicitly request case insensitive behavior.
3178 * If the source and target names provided differ only
3179 * by case (e.g., a request to rename 'tim' to 'Tim'),
3180 * we will treat this as a special case in the
3181 * case-insensitive mode: as long as the source name
3182 * is an exact match, we will allow this to proceed as
3183 * a name-change request.
3185 if ((zsb
->z_case
== ZFS_CASE_INSENSITIVE
||
3186 (zsb
->z_case
== ZFS_CASE_MIXED
&&
3187 flags
& FIGNORECASE
)) &&
3188 u8_strcmp(snm
, tnm
, 0, zsb
->z_norm
, U8_UNICODE_LATEST
,
3191 * case preserving rename request, require exact
3200 * If the source and destination directories are the same, we should
3201 * grab the z_name_lock of that directory only once.
3205 rw_enter(&sdzp
->z_name_lock
, RW_READER
);
3209 serr
= zfs_dirent_lock(&sdl
, sdzp
, snm
, &szp
,
3210 ZEXISTS
| zflg
, NULL
, NULL
);
3211 terr
= zfs_dirent_lock(&tdl
,
3212 tdzp
, tnm
, &tzp
, ZRENAMING
| zflg
, NULL
, NULL
);
3214 terr
= zfs_dirent_lock(&tdl
,
3215 tdzp
, tnm
, &tzp
, zflg
, NULL
, NULL
);
3216 serr
= zfs_dirent_lock(&sdl
,
3217 sdzp
, snm
, &szp
, ZEXISTS
| ZRENAMING
| zflg
,
3223 * Source entry invalid or not there.
3226 zfs_dirent_unlock(tdl
);
3232 rw_exit(&sdzp
->z_name_lock
);
3234 if (strcmp(snm
, "..") == 0)
3240 zfs_dirent_unlock(sdl
);
3244 rw_exit(&sdzp
->z_name_lock
);
3246 if (strcmp(tnm
, "..") == 0)
3253 * Must have write access at the source to remove the old entry
3254 * and write access at the target to create the new entry.
3255 * Note that if target and source are the same, this can be
3256 * done in a single check.
3259 if ((error
= zfs_zaccess_rename(sdzp
, szp
, tdzp
, tzp
, cr
)))
3262 if (S_ISDIR(ZTOI(szp
)->i_mode
)) {
3264 * Check to make sure rename is valid.
3265 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3267 if ((error
= zfs_rename_lock(szp
, tdzp
, sdzp
, &zl
)))
3272 * Does target exist?
3276 * Source and target must be the same type.
3278 if (S_ISDIR(ZTOI(szp
)->i_mode
)) {
3279 if (!S_ISDIR(ZTOI(tzp
)->i_mode
)) {
3284 if (S_ISDIR(ZTOI(tzp
)->i_mode
)) {
3290 * POSIX dictates that when the source and target
3291 * entries refer to the same file object, rename
3292 * must do nothing and exit without error.
3294 if (szp
->z_id
== tzp
->z_id
) {
3300 tx
= dmu_tx_create(zsb
->z_os
);
3301 dmu_tx_hold_sa(tx
, szp
->z_sa_hdl
, B_FALSE
);
3302 dmu_tx_hold_sa(tx
, sdzp
->z_sa_hdl
, B_FALSE
);
3303 dmu_tx_hold_zap(tx
, sdzp
->z_id
, FALSE
, snm
);
3304 dmu_tx_hold_zap(tx
, tdzp
->z_id
, TRUE
, tnm
);
3306 dmu_tx_hold_sa(tx
, tdzp
->z_sa_hdl
, B_FALSE
);
3307 zfs_sa_upgrade_txholds(tx
, tdzp
);
3310 dmu_tx_hold_sa(tx
, tzp
->z_sa_hdl
, B_FALSE
);
3311 zfs_sa_upgrade_txholds(tx
, tzp
);
3314 zfs_sa_upgrade_txholds(tx
, szp
);
3315 dmu_tx_hold_zap(tx
, zsb
->z_unlinkedobj
, FALSE
, NULL
);
3316 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
3319 zfs_rename_unlock(&zl
);
3320 zfs_dirent_unlock(sdl
);
3321 zfs_dirent_unlock(tdl
);
3324 rw_exit(&sdzp
->z_name_lock
);
3329 if (error
== ERESTART
) {
3339 if (tzp
) /* Attempt to remove the existing target */
3340 error
= zfs_link_destroy(tdl
, tzp
, tx
, zflg
, NULL
);
3343 error
= zfs_link_create(tdl
, szp
, tx
, ZRENAMING
);
3345 szp
->z_pflags
|= ZFS_AV_MODIFIED
;
3347 error
= sa_update(szp
->z_sa_hdl
, SA_ZPL_FLAGS(zsb
),
3348 (void *)&szp
->z_pflags
, sizeof (uint64_t), tx
);
3349 ASSERT3U(error
, ==, 0);
3351 error
= zfs_link_destroy(sdl
, szp
, tx
, ZRENAMING
, NULL
);
3353 zfs_log_rename(zilog
, tx
, TX_RENAME
|
3354 (flags
& FIGNORECASE
? TX_CI
: 0), sdzp
,
3355 sdl
->dl_name
, tdzp
, tdl
->dl_name
, szp
);
3358 * At this point, we have successfully created
3359 * the target name, but have failed to remove
3360 * the source name. Since the create was done
3361 * with the ZRENAMING flag, there are
3362 * complications; for one, the link count is
3363 * wrong. The easiest way to deal with this
3364 * is to remove the newly created target, and
3365 * return the original error. This must
3366 * succeed; fortunately, it is very unlikely to
3367 * fail, since we just created it.
3369 VERIFY3U(zfs_link_destroy(tdl
, szp
, tx
,
3370 ZRENAMING
, NULL
), ==, 0);
3378 zfs_rename_unlock(&zl
);
3380 zfs_dirent_unlock(sdl
);
3381 zfs_dirent_unlock(tdl
);
3383 zfs_inode_update(sdzp
);
3385 rw_exit(&sdzp
->z_name_lock
);
3388 zfs_inode_update(tdzp
);
3390 zfs_inode_update(szp
);
3393 zfs_inode_update(tzp
);
3397 if (zsb
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
3398 zil_commit(zilog
, 0);
3403 EXPORT_SYMBOL(zfs_rename
);
3406 * Insert the indicated symbolic reference entry into the directory.
3408 * IN: dip - Directory to contain new symbolic link.
3409 * link - Name for new symlink entry.
3410 * vap - Attributes of new entry.
3411 * target - Target path of new symlink.
3413 * cr - credentials of caller.
3414 * flags - case flags
3416 * RETURN: 0 if success
3417 * error code if failure
3420 * dip - ctime|mtime updated
3424 zfs_symlink(struct inode
*dip
, char *name
, vattr_t
*vap
, char *link
,
3425 struct inode
**ipp
, cred_t
*cr
, int flags
)
3427 znode_t
*zp
, *dzp
= ITOZ(dip
);
3430 zfs_sb_t
*zsb
= ITOZSB(dip
);
3432 uint64_t len
= strlen(link
);
3435 zfs_acl_ids_t acl_ids
;
3436 boolean_t fuid_dirtied
;
3437 uint64_t txtype
= TX_SYMLINK
;
3439 ASSERT(S_ISLNK(vap
->va_mode
));
3445 if (zsb
->z_utf8
&& u8_validate(name
, strlen(name
),
3446 NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
3450 if (flags
& FIGNORECASE
)
3453 if (len
> MAXPATHLEN
) {
3455 return (ENAMETOOLONG
);
3458 if ((error
= zfs_acl_ids_create(dzp
, 0,
3459 vap
, cr
, NULL
, &acl_ids
)) != 0) {
3467 * Attempt to lock directory; fail if entry already exists.
3469 error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
, NULL
, NULL
);
3471 zfs_acl_ids_free(&acl_ids
);
3476 if ((error
= zfs_zaccess(dzp
, ACE_ADD_FILE
, 0, B_FALSE
, cr
))) {
3477 zfs_acl_ids_free(&acl_ids
);
3478 zfs_dirent_unlock(dl
);
3483 if (zfs_acl_ids_overquota(zsb
, &acl_ids
)) {
3484 zfs_acl_ids_free(&acl_ids
);
3485 zfs_dirent_unlock(dl
);
3489 tx
= dmu_tx_create(zsb
->z_os
);
3490 fuid_dirtied
= zsb
->z_fuid_dirty
;
3491 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
, 0, MAX(1, len
));
3492 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, name
);
3493 dmu_tx_hold_sa_create(tx
, acl_ids
.z_aclp
->z_acl_bytes
+
3494 ZFS_SA_BASE_ATTR_SIZE
+ len
);
3495 dmu_tx_hold_sa(tx
, dzp
->z_sa_hdl
, B_FALSE
);
3496 if (!zsb
->z_use_sa
&& acl_ids
.z_aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
3497 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
, 0,
3498 acl_ids
.z_aclp
->z_acl_bytes
);
3501 zfs_fuid_txhold(zsb
, tx
);
3502 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
3504 zfs_dirent_unlock(dl
);
3505 if (error
== ERESTART
) {
3510 zfs_acl_ids_free(&acl_ids
);
3517 * Create a new object for the symlink.
3518 * for version 4 ZPL datsets the symlink will be an SA attribute
3520 zfs_mknode(dzp
, vap
, tx
, cr
, 0, &zp
, &acl_ids
);
3523 zfs_fuid_sync(zsb
, tx
);
3525 mutex_enter(&zp
->z_lock
);
3527 error
= sa_update(zp
->z_sa_hdl
, SA_ZPL_SYMLINK(zsb
),
3530 zfs_sa_symlink(zp
, link
, len
, tx
);
3531 mutex_exit(&zp
->z_lock
);
3534 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_SIZE(zsb
),
3535 &zp
->z_size
, sizeof (zp
->z_size
), tx
);
3537 * Insert the new object into the directory.
3539 (void) zfs_link_create(dl
, zp
, tx
, ZNEW
);
3541 if (flags
& FIGNORECASE
)
3543 zfs_log_symlink(zilog
, tx
, txtype
, dzp
, zp
, name
, link
);
3545 zfs_inode_update(dzp
);
3546 zfs_inode_update(zp
);
3548 zfs_acl_ids_free(&acl_ids
);
3552 zfs_dirent_unlock(dl
);
3556 if (zsb
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
3557 zil_commit(zilog
, 0);
3562 EXPORT_SYMBOL(zfs_symlink
);
3565 * Return, in the buffer contained in the provided uio structure,
3566 * the symbolic path referred to by ip.
3568 * IN: ip - inode of symbolic link
3569 * uio - structure to contain the link path.
3570 * cr - credentials of caller.
3572 * RETURN: 0 if success
3573 * error code if failure
3576 * ip - atime updated
3580 zfs_readlink(struct inode
*ip
, uio_t
*uio
, cred_t
*cr
)
3582 znode_t
*zp
= ITOZ(ip
);
3583 zfs_sb_t
*zsb
= ITOZSB(ip
);
3589 mutex_enter(&zp
->z_lock
);
3591 error
= sa_lookup_uio(zp
->z_sa_hdl
,
3592 SA_ZPL_SYMLINK(zsb
), uio
);
3594 error
= zfs_sa_readlink(zp
, uio
);
3595 mutex_exit(&zp
->z_lock
);
3597 ZFS_ACCESSTIME_STAMP(zsb
, zp
);
3598 zfs_inode_update(zp
);
3602 EXPORT_SYMBOL(zfs_readlink
);
3605 * Insert a new entry into directory tdip referencing sip.
3607 * IN: tdip - Directory to contain new entry.
3608 * sip - inode of new entry.
3609 * name - name of new entry.
3610 * cr - credentials of caller.
3612 * RETURN: 0 if success
3613 * error code if failure
3616 * tdip - ctime|mtime updated
3617 * sip - ctime updated
3621 zfs_link(struct inode
*tdip
, struct inode
*sip
, char *name
, cred_t
*cr
)
3623 znode_t
*dzp
= ITOZ(tdip
);
3625 zfs_sb_t
*zsb
= ITOZSB(tdip
);
3634 ASSERT(S_ISDIR(tdip
->i_mode
));
3641 * POSIX dictates that we return EPERM here.
3642 * Better choices include ENOTSUP or EISDIR.
3644 if (S_ISDIR(sip
->i_mode
)) {
3649 if (sip
->i_sb
!= tdip
->i_sb
) {
3657 /* Prevent links to .zfs/shares files */
3659 if ((error
= sa_lookup(szp
->z_sa_hdl
, SA_ZPL_PARENT(zsb
),
3660 &parent
, sizeof (uint64_t))) != 0) {
3664 if (parent
== zsb
->z_shares_dir
) {
3669 if (zsb
->z_utf8
&& u8_validate(name
,
3670 strlen(name
), NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
3674 #ifdef HAVE_PN_UTILS
3675 if (flags
& FIGNORECASE
)
3677 #endif /* HAVE_PN_UTILS */
3680 * We do not support links between attributes and non-attributes
3681 * because of the potential security risk of creating links
3682 * into "normal" file space in order to circumvent restrictions
3683 * imposed in attribute space.
3685 if ((szp
->z_pflags
& ZFS_XATTR
) != (dzp
->z_pflags
& ZFS_XATTR
)) {
3690 owner
= zfs_fuid_map_id(zsb
, szp
->z_uid
, cr
, ZFS_OWNER
);
3691 if (owner
!= crgetuid(cr
) && secpolicy_basic_link(cr
) != 0) {
3696 if ((error
= zfs_zaccess(dzp
, ACE_ADD_FILE
, 0, B_FALSE
, cr
))) {
3703 * Attempt to lock directory; fail if entry already exists.
3705 error
= zfs_dirent_lock(&dl
, dzp
, name
, &tzp
, zf
, NULL
, NULL
);
3711 tx
= dmu_tx_create(zsb
->z_os
);
3712 dmu_tx_hold_sa(tx
, szp
->z_sa_hdl
, B_FALSE
);
3713 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, name
);
3714 zfs_sa_upgrade_txholds(tx
, szp
);
3715 zfs_sa_upgrade_txholds(tx
, dzp
);
3716 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
3718 zfs_dirent_unlock(dl
);
3719 if (error
== ERESTART
) {
3729 error
= zfs_link_create(dl
, szp
, tx
, 0);
3732 uint64_t txtype
= TX_LINK
;
3733 #ifdef HAVE_PN_UTILS
3734 if (flags
& FIGNORECASE
)
3736 #endif /* HAVE_PN_UTILS */
3737 zfs_log_link(zilog
, tx
, txtype
, dzp
, szp
, name
);
3742 zfs_dirent_unlock(dl
);
3744 if (zsb
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
3745 zil_commit(zilog
, 0);
3747 zfs_inode_update(dzp
);
3748 zfs_inode_update(szp
);
3752 EXPORT_SYMBOL(zfs_link
);
3755 zfs_putpage_commit_cb(void *arg
, int error
)
3757 struct page
*pp
= arg
;
3760 __set_page_dirty_nobuffers(pp
);
3762 if (error
!= ECANCELED
)
3768 end_page_writeback(pp
);
3772 * Push a page out to disk, once the page is on stable storage the
3773 * registered commit callback will be run as notification of completion.
3775 * IN: ip - page mapped for inode.
3776 * pp - page to push (page is locked)
3777 * wbc - writeback control data
3779 * RETURN: 0 if success
3780 * error code if failure
3783 * ip - ctime|mtime updated
3787 zfs_putpage(struct inode
*ip
, struct page
*pp
, struct writeback_control
*wbc
)
3789 znode_t
*zp
= ITOZ(ip
);
3790 zfs_sb_t
*zsb
= ITOZSB(ip
);
3798 uint64_t mtime
[2], ctime
[2];
3799 sa_bulk_attr_t bulk
[3];
3806 ASSERT(PageLocked(pp
));
3808 pgoff
= page_offset(pp
); /* Page byte-offset in file */
3809 offset
= i_size_read(ip
); /* File length in bytes */
3810 pglen
= MIN(PAGE_CACHE_SIZE
, /* Page length in bytes */
3811 P2ROUNDUP(offset
, PAGE_CACHE_SIZE
)-pgoff
);
3813 /* Page is beyond end of file */
3814 if (pgoff
>= offset
) {
3820 /* Truncate page length to end of file */
3821 if (pgoff
+ pglen
> offset
)
3822 pglen
= offset
- pgoff
;
3826 * FIXME: Allow mmap writes past its quota. The correct fix
3827 * is to register a page_mkwrite() handler to count the page
3828 * against its quota when it is about to be dirtied.
3830 if (zfs_owner_overquota(zsb
, zp
, B_FALSE
) ||
3831 zfs_owner_overquota(zsb
, zp
, B_TRUE
)) {
3836 set_page_writeback(pp
);
3839 rl
= zfs_range_lock(zp
, pgoff
, pglen
, RL_WRITER
);
3840 tx
= dmu_tx_create(zsb
->z_os
);
3842 sync
= ((zsb
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
) ||
3843 (wbc
->sync_mode
== WB_SYNC_ALL
));
3845 dmu_tx_callback_register(tx
, zfs_putpage_commit_cb
, pp
);
3847 dmu_tx_hold_write(tx
, zp
->z_id
, pgoff
, pglen
);
3849 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
3850 zfs_sa_upgrade_txholds(tx
, zp
);
3851 err
= dmu_tx_assign(tx
, TXG_NOWAIT
);
3853 if (err
== ERESTART
)
3856 /* Will call all registered commit callbacks */
3860 * For the synchronous case the commit callback must be
3861 * explicitly called because there is no registered callback.
3864 zfs_putpage_commit_cb(pp
, ECANCELED
);
3866 zfs_range_unlock(rl
);
3872 ASSERT3U(pglen
, <=, PAGE_CACHE_SIZE
);
3873 dmu_write(zsb
->z_os
, zp
->z_id
, pgoff
, pglen
, va
, tx
);
3876 SA_ADD_BULK_ATTR(bulk
, cnt
, SA_ZPL_MTIME(zsb
), NULL
, &mtime
, 16);
3877 SA_ADD_BULK_ATTR(bulk
, cnt
, SA_ZPL_CTIME(zsb
), NULL
, &ctime
, 16);
3878 SA_ADD_BULK_ATTR(bulk
, cnt
, SA_ZPL_FLAGS(zsb
), NULL
, &zp
->z_pflags
, 8);
3880 /* Preserve the mtime and ctime provided by the inode */
3881 ZFS_TIME_ENCODE(&ip
->i_mtime
, mtime
);
3882 ZFS_TIME_ENCODE(&ip
->i_ctime
, ctime
);
3883 zp
->z_atime_dirty
= 0;
3886 err
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, cnt
, tx
);
3888 zfs_log_write(zsb
->z_log
, tx
, TX_WRITE
, zp
, pgoff
, pglen
, 0);
3891 zfs_range_unlock(rl
);
3894 zil_commit(zsb
->z_log
, zp
->z_id
);
3895 zfs_putpage_commit_cb(pp
, err
);
3903 * Update the system attributes when the inode has been dirtied. For the
3904 * moment we're conservative and only update the atime, mtime, and ctime.
3907 zfs_dirty_inode(struct inode
*ip
, int flags
)
3909 znode_t
*zp
= ITOZ(ip
);
3910 zfs_sb_t
*zsb
= ITOZSB(ip
);
3912 uint64_t atime
[2], mtime
[2], ctime
[2];
3913 sa_bulk_attr_t bulk
[3];
3920 tx
= dmu_tx_create(zsb
->z_os
);
3922 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
3923 zfs_sa_upgrade_txholds(tx
, zp
);
3925 error
= dmu_tx_assign(tx
, TXG_WAIT
);
3931 mutex_enter(&zp
->z_lock
);
3932 SA_ADD_BULK_ATTR(bulk
, cnt
, SA_ZPL_ATIME(zsb
), NULL
, &atime
, 16);
3933 SA_ADD_BULK_ATTR(bulk
, cnt
, SA_ZPL_MTIME(zsb
), NULL
, &mtime
, 16);
3934 SA_ADD_BULK_ATTR(bulk
, cnt
, SA_ZPL_CTIME(zsb
), NULL
, &ctime
, 16);
3936 /* Preserve the mtime and ctime provided by the inode */
3937 ZFS_TIME_ENCODE(&ip
->i_atime
, atime
);
3938 ZFS_TIME_ENCODE(&ip
->i_mtime
, mtime
);
3939 ZFS_TIME_ENCODE(&ip
->i_ctime
, ctime
);
3940 zp
->z_atime_dirty
= 0;
3942 error
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, cnt
, tx
);
3943 mutex_exit(&zp
->z_lock
);
3950 EXPORT_SYMBOL(zfs_dirty_inode
);
3954 zfs_inactive(struct inode
*ip
)
3956 znode_t
*zp
= ITOZ(ip
);
3957 zfs_sb_t
*zsb
= ITOZSB(ip
);
3960 if (zfsctl_is_node(ip
)) {
3961 zfsctl_inode_inactive(ip
);
3965 rw_enter(&zsb
->z_teardown_inactive_lock
, RW_READER
);
3966 if (zp
->z_sa_hdl
== NULL
) {
3967 rw_exit(&zsb
->z_teardown_inactive_lock
);
3971 if (zp
->z_atime_dirty
&& zp
->z_unlinked
== 0) {
3972 dmu_tx_t
*tx
= dmu_tx_create(zsb
->z_os
);
3974 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
3975 zfs_sa_upgrade_txholds(tx
, zp
);
3976 error
= dmu_tx_assign(tx
, TXG_WAIT
);
3980 mutex_enter(&zp
->z_lock
);
3981 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_ATIME(zsb
),
3982 (void *)&zp
->z_atime
, sizeof (zp
->z_atime
), tx
);
3983 zp
->z_atime_dirty
= 0;
3984 mutex_exit(&zp
->z_lock
);
3990 rw_exit(&zsb
->z_teardown_inactive_lock
);
3992 EXPORT_SYMBOL(zfs_inactive
);
3995 * Bounds-check the seek operation.
3997 * IN: ip - inode seeking within
3998 * ooff - old file offset
3999 * noffp - pointer to new file offset
4000 * ct - caller context
4002 * RETURN: 0 if success
4003 * EINVAL if new offset invalid
4007 zfs_seek(struct inode
*ip
, offset_t ooff
, offset_t
*noffp
)
4009 if (S_ISDIR(ip
->i_mode
))
4011 return ((*noffp
< 0 || *noffp
> MAXOFFSET_T
) ? EINVAL
: 0);
4013 EXPORT_SYMBOL(zfs_seek
);
4016 * Fill pages with data from the disk.
4019 zfs_fillpage(struct inode
*ip
, struct page
*pl
[], int nr_pages
)
4021 znode_t
*zp
= ITOZ(ip
);
4022 zfs_sb_t
*zsb
= ITOZSB(ip
);
4024 struct page
*cur_pp
;
4025 u_offset_t io_off
, total
;
4032 io_len
= nr_pages
<< PAGE_CACHE_SHIFT
;
4033 i_size
= i_size_read(ip
);
4034 io_off
= page_offset(pl
[0]);
4036 if (io_off
+ io_len
> i_size
)
4037 io_len
= i_size
- io_off
;
4040 * Iterate over list of pages and read each page individually.
4044 for (total
= io_off
+ io_len
; io_off
< total
; io_off
+= PAGESIZE
) {
4048 err
= dmu_read(os
, zp
->z_id
, io_off
, PAGESIZE
, va
,
4052 /* convert checksum errors into IO errors */
4057 cur_pp
= pl
[++page_idx
];
4064 * Uses zfs_fillpage to read data from the file and fill the pages.
4066 * IN: ip - inode of file to get data from.
4067 * pl - list of pages to read
4068 * nr_pages - number of pages to read
4070 * RETURN: 0 if success
4071 * error code if failure
4074 * vp - atime updated
4078 zfs_getpage(struct inode
*ip
, struct page
*pl
[], int nr_pages
)
4080 znode_t
*zp
= ITOZ(ip
);
4081 zfs_sb_t
*zsb
= ITOZSB(ip
);
4090 err
= zfs_fillpage(ip
, pl
, nr_pages
);
4093 ZFS_ACCESSTIME_STAMP(zsb
, zp
);
4098 EXPORT_SYMBOL(zfs_getpage
);
4101 * Check ZFS specific permissions to memory map a section of a file.
4103 * IN: ip - inode of the file to mmap
4105 * addrp - start address in memory region
4106 * len - length of memory region
4107 * vm_flags- address flags
4109 * RETURN: 0 if success
4110 * error code if failure
4114 zfs_map(struct inode
*ip
, offset_t off
, caddr_t
*addrp
, size_t len
,
4115 unsigned long vm_flags
)
4117 znode_t
*zp
= ITOZ(ip
);
4118 zfs_sb_t
*zsb
= ITOZSB(ip
);
4123 if ((vm_flags
& VM_WRITE
) && (zp
->z_pflags
&
4124 (ZFS_IMMUTABLE
| ZFS_READONLY
| ZFS_APPENDONLY
))) {
4129 if ((vm_flags
& (VM_READ
| VM_EXEC
)) &&
4130 (zp
->z_pflags
& ZFS_AV_QUARANTINED
)) {
4135 if (off
< 0 || len
> MAXOFFSET_T
- off
) {
4143 EXPORT_SYMBOL(zfs_map
);
4146 * convoff - converts the given data (start, whence) to the
4150 convoff(struct inode
*ip
, flock64_t
*lckdat
, int whence
, offset_t offset
)
4155 if ((lckdat
->l_whence
== 2) || (whence
== 2)) {
4156 if ((error
= zfs_getattr(ip
, &vap
, 0, CRED()) != 0))
4160 switch (lckdat
->l_whence
) {
4162 lckdat
->l_start
+= offset
;
4165 lckdat
->l_start
+= vap
.va_size
;
4173 if (lckdat
->l_start
< 0)
4178 lckdat
->l_start
-= offset
;
4181 lckdat
->l_start
-= vap
.va_size
;
4189 lckdat
->l_whence
= (short)whence
;
4194 * Free or allocate space in a file. Currently, this function only
4195 * supports the `F_FREESP' command. However, this command is somewhat
4196 * misnamed, as its functionality includes the ability to allocate as
4197 * well as free space.
4199 * IN: ip - inode of file to free data in.
4200 * cmd - action to take (only F_FREESP supported).
4201 * bfp - section of file to free/alloc.
4202 * flag - current file open mode flags.
4203 * offset - current file offset.
4204 * cr - credentials of caller [UNUSED].
4206 * RETURN: 0 if success
4207 * error code if failure
4210 * ip - ctime|mtime updated
4214 zfs_space(struct inode
*ip
, int cmd
, flock64_t
*bfp
, int flag
,
4215 offset_t offset
, cred_t
*cr
)
4217 znode_t
*zp
= ITOZ(ip
);
4218 zfs_sb_t
*zsb
= ITOZSB(ip
);
4225 if (cmd
!= F_FREESP
) {
4230 if ((error
= convoff(ip
, bfp
, 0, offset
))) {
4235 if (bfp
->l_len
< 0) {
4241 * Permissions aren't checked on Solaris because on this OS
4242 * zfs_space() can only be called with an opened file handle.
4243 * On Linux we can get here through truncate_range() which
4244 * operates directly on inodes, so we need to check access rights.
4246 if ((error
= zfs_zaccess(zp
, ACE_WRITE_DATA
, 0, B_FALSE
, cr
))) {
4252 len
= bfp
->l_len
; /* 0 means from off to end of file */
4254 error
= zfs_freesp(zp
, off
, len
, flag
, TRUE
);
4259 EXPORT_SYMBOL(zfs_space
);
4263 zfs_fid(struct inode
*ip
, fid_t
*fidp
)
4265 znode_t
*zp
= ITOZ(ip
);
4266 zfs_sb_t
*zsb
= ITOZSB(ip
);
4269 uint64_t object
= zp
->z_id
;
4276 if ((error
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_GEN(zsb
),
4277 &gen64
, sizeof (uint64_t))) != 0) {
4282 gen
= (uint32_t)gen64
;
4284 size
= (zsb
->z_parent
!= zsb
) ? LONG_FID_LEN
: SHORT_FID_LEN
;
4285 if (fidp
->fid_len
< size
) {
4286 fidp
->fid_len
= size
;
4291 zfid
= (zfid_short_t
*)fidp
;
4293 zfid
->zf_len
= size
;
4295 for (i
= 0; i
< sizeof (zfid
->zf_object
); i
++)
4296 zfid
->zf_object
[i
] = (uint8_t)(object
>> (8 * i
));
4298 /* Must have a non-zero generation number to distinguish from .zfs */
4301 for (i
= 0; i
< sizeof (zfid
->zf_gen
); i
++)
4302 zfid
->zf_gen
[i
] = (uint8_t)(gen
>> (8 * i
));
4304 if (size
== LONG_FID_LEN
) {
4305 uint64_t objsetid
= dmu_objset_id(zsb
->z_os
);
4308 zlfid
= (zfid_long_t
*)fidp
;
4310 for (i
= 0; i
< sizeof (zlfid
->zf_setid
); i
++)
4311 zlfid
->zf_setid
[i
] = (uint8_t)(objsetid
>> (8 * i
));
4313 /* XXX - this should be the generation number for the objset */
4314 for (i
= 0; i
< sizeof (zlfid
->zf_setgen
); i
++)
4315 zlfid
->zf_setgen
[i
] = 0;
4321 EXPORT_SYMBOL(zfs_fid
);
4325 zfs_getsecattr(struct inode
*ip
, vsecattr_t
*vsecp
, int flag
, cred_t
*cr
)
4327 znode_t
*zp
= ITOZ(ip
);
4328 zfs_sb_t
*zsb
= ITOZSB(ip
);
4330 boolean_t skipaclchk
= (flag
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
4334 error
= zfs_getacl(zp
, vsecp
, skipaclchk
, cr
);
4339 EXPORT_SYMBOL(zfs_getsecattr
);
4343 zfs_setsecattr(struct inode
*ip
, vsecattr_t
*vsecp
, int flag
, cred_t
*cr
)
4345 znode_t
*zp
= ITOZ(ip
);
4346 zfs_sb_t
*zsb
= ITOZSB(ip
);
4348 boolean_t skipaclchk
= (flag
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
4349 zilog_t
*zilog
= zsb
->z_log
;
4354 error
= zfs_setacl(zp
, vsecp
, skipaclchk
, cr
);
4356 if (zsb
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
4357 zil_commit(zilog
, 0);
4362 EXPORT_SYMBOL(zfs_setsecattr
);
4364 #ifdef HAVE_UIO_ZEROCOPY
4366 * Tunable, both must be a power of 2.
4368 * zcr_blksz_min: the smallest read we may consider to loan out an arcbuf
4369 * zcr_blksz_max: if set to less than the file block size, allow loaning out of
4370 * an arcbuf for a partial block read
4372 int zcr_blksz_min
= (1 << 10); /* 1K */
4373 int zcr_blksz_max
= (1 << 17); /* 128K */
4377 zfs_reqzcbuf(struct inode
*ip
, enum uio_rw ioflag
, xuio_t
*xuio
, cred_t
*cr
)
4379 znode_t
*zp
= ITOZ(ip
);
4380 zfs_sb_t
*zsb
= ITOZSB(ip
);
4381 int max_blksz
= zsb
->z_max_blksz
;
4382 uio_t
*uio
= &xuio
->xu_uio
;
4383 ssize_t size
= uio
->uio_resid
;
4384 offset_t offset
= uio
->uio_loffset
;
4389 int preamble
, postamble
;
4391 if (xuio
->xu_type
!= UIOTYPE_ZEROCOPY
)
4399 * Loan out an arc_buf for write if write size is bigger than
4400 * max_blksz, and the file's block size is also max_blksz.
4403 if (size
< blksz
|| zp
->z_blksz
!= blksz
) {
4408 * Caller requests buffers for write before knowing where the
4409 * write offset might be (e.g. NFS TCP write).
4414 preamble
= P2PHASE(offset
, blksz
);
4416 preamble
= blksz
- preamble
;
4421 postamble
= P2PHASE(size
, blksz
);
4424 fullblk
= size
/ blksz
;
4425 (void) dmu_xuio_init(xuio
,
4426 (preamble
!= 0) + fullblk
+ (postamble
!= 0));
4429 * Have to fix iov base/len for partial buffers. They
4430 * currently represent full arc_buf's.
4433 /* data begins in the middle of the arc_buf */
4434 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
4437 (void) dmu_xuio_add(xuio
, abuf
,
4438 blksz
- preamble
, preamble
);
4441 for (i
= 0; i
< fullblk
; i
++) {
4442 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
4445 (void) dmu_xuio_add(xuio
, abuf
, 0, blksz
);
4449 /* data ends in the middle of the arc_buf */
4450 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
4453 (void) dmu_xuio_add(xuio
, abuf
, 0, postamble
);
4458 * Loan out an arc_buf for read if the read size is larger than
4459 * the current file block size. Block alignment is not
4460 * considered. Partial arc_buf will be loaned out for read.
4462 blksz
= zp
->z_blksz
;
4463 if (blksz
< zcr_blksz_min
)
4464 blksz
= zcr_blksz_min
;
4465 if (blksz
> zcr_blksz_max
)
4466 blksz
= zcr_blksz_max
;
4467 /* avoid potential complexity of dealing with it */
4468 if (blksz
> max_blksz
) {
4473 maxsize
= zp
->z_size
- uio
->uio_loffset
;
4487 uio
->uio_extflg
= UIO_XUIO
;
4488 XUIO_XUZC_RW(xuio
) = ioflag
;
4495 zfs_retzcbuf(struct inode
*ip
, xuio_t
*xuio
, cred_t
*cr
)
4499 int ioflag
= XUIO_XUZC_RW(xuio
);
4501 ASSERT(xuio
->xu_type
== UIOTYPE_ZEROCOPY
);
4503 i
= dmu_xuio_cnt(xuio
);
4505 abuf
= dmu_xuio_arcbuf(xuio
, i
);
4507 * if abuf == NULL, it must be a write buffer
4508 * that has been returned in zfs_write().
4511 dmu_return_arcbuf(abuf
);
4512 ASSERT(abuf
|| ioflag
== UIO_WRITE
);
4515 dmu_xuio_fini(xuio
);
4518 #endif /* HAVE_UIO_ZEROCOPY */
4520 #if defined(_KERNEL) && defined(HAVE_SPL)
4521 module_param(zfs_read_chunk_size
, long, 0644);
4522 MODULE_PARM_DESC(zfs_read_chunk_size
, "Bytes to read per chunk");