4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
25 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
26 * Copyright 2017 Nexenta Systems, Inc.
29 /* Portions Copyright 2007 Jeremy Teo */
30 /* Portions Copyright 2010 Robert Milkowski */
33 #include <sys/types.h>
34 #include <sys/param.h>
36 #include <sys/sysmacros.h>
41 #include <sys/taskq.h>
43 #include <sys/vmsystm.h>
44 #include <sys/atomic.h>
45 #include <sys/pathname.h>
46 #include <sys/cmn_err.h>
47 #include <sys/errno.h>
48 #include <sys/zfs_dir.h>
49 #include <sys/zfs_acl.h>
50 #include <sys/zfs_ioctl.h>
51 #include <sys/fs/zfs.h>
53 #include <sys/dmu_objset.h>
59 #include <sys/policy.h>
60 #include <sys/sunddi.h>
63 #include <sys/zfs_ctldir.h>
64 #include <sys/zfs_fuid.h>
65 #include <sys/zfs_sa.h>
66 #include <sys/zfs_vnops.h>
67 #include <sys/zfs_rlock.h>
71 #include <sys/sa_impl.h>
76 * Each vnode op performs some logical unit of work. To do this, the ZPL must
77 * properly lock its in-core state, create a DMU transaction, do the work,
78 * record this work in the intent log (ZIL), commit the DMU transaction,
79 * and wait for the intent log to commit if it is a synchronous operation.
80 * Moreover, the vnode ops must work in both normal and log replay context.
81 * The ordering of events is important to avoid deadlocks and references
82 * to freed memory. The example below illustrates the following Big Rules:
84 * (1) A check must be made in each zfs thread for a mounted file system.
85 * This is done avoiding races using ZFS_ENTER(zfsvfs).
86 * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes
87 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
88 * can return EIO from the calling function.
90 * (2) iput() should always be the last thing except for zil_commit()
91 * (if necessary) and ZFS_EXIT(). This is for 3 reasons:
92 * First, if it's the last reference, the vnode/znode
93 * can be freed, so the zp may point to freed memory. Second, the last
94 * reference will call zfs_zinactive(), which may induce a lot of work --
95 * pushing cached pages (which acquires range locks) and syncing out
96 * cached atime changes. Third, zfs_zinactive() may require a new tx,
97 * which could deadlock the system if you were already holding one.
98 * If you must call iput() within a tx then use zfs_iput_async().
100 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
101 * as they can span dmu_tx_assign() calls.
103 * (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
104 * dmu_tx_assign(). This is critical because we don't want to block
105 * while holding locks.
107 * If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This
108 * reduces lock contention and CPU usage when we must wait (note that if
109 * throughput is constrained by the storage, nearly every transaction
112 * Note, in particular, that if a lock is sometimes acquired before
113 * the tx assigns, and sometimes after (e.g. z_lock), then failing
114 * to use a non-blocking assign can deadlock the system. The scenario:
116 * Thread A has grabbed a lock before calling dmu_tx_assign().
117 * Thread B is in an already-assigned tx, and blocks for this lock.
118 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
119 * forever, because the previous txg can't quiesce until B's tx commits.
121 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
122 * then drop all locks, call dmu_tx_wait(), and try again. On subsequent
123 * calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT,
124 * to indicate that this operation has already called dmu_tx_wait().
125 * This will ensure that we don't retry forever, waiting a short bit
128 * (5) If the operation succeeded, generate the intent log entry for it
129 * before dropping locks. This ensures that the ordering of events
130 * in the intent log matches the order in which they actually occurred.
131 * During ZIL replay the zfs_log_* functions will update the sequence
132 * number to indicate the zil transaction has replayed.
134 * (6) At the end of each vnode op, the DMU tx must always commit,
135 * regardless of whether there were any errors.
137 * (7) After dropping all locks, invoke zil_commit(zilog, foid)
138 * to ensure that synchronous semantics are provided when necessary.
140 * In general, this is how things should be ordered in each vnode op:
142 * ZFS_ENTER(zfsvfs); // exit if unmounted
144 * zfs_dirent_lock(&dl, ...) // lock directory entry (may igrab())
145 * rw_enter(...); // grab any other locks you need
146 * tx = dmu_tx_create(...); // get DMU tx
147 * dmu_tx_hold_*(); // hold each object you might modify
148 * error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
150 * rw_exit(...); // drop locks
151 * zfs_dirent_unlock(dl); // unlock directory entry
152 * iput(...); // release held vnodes
153 * if (error == ERESTART) {
159 * dmu_tx_abort(tx); // abort DMU tx
160 * ZFS_EXIT(zfsvfs); // finished in zfs
161 * return (error); // really out of space
163 * error = do_real_work(); // do whatever this VOP does
165 * zfs_log_*(...); // on success, make ZIL entry
166 * dmu_tx_commit(tx); // commit DMU tx -- error or not
167 * rw_exit(...); // drop locks
168 * zfs_dirent_unlock(dl); // unlock directory entry
169 * iput(...); // release held vnodes
170 * zil_commit(zilog, foid); // synchronous when necessary
171 * ZFS_EXIT(zfsvfs); // finished in zfs
172 * return (error); // done, report error
176 * Virus scanning is unsupported. It would be possible to add a hook
177 * here to performance the required virus scan. This could be done
178 * entirely in the kernel or potentially as an update to invoke a
182 zfs_vscan(struct inode
*ip
, cred_t
*cr
, int async
)
189 zfs_open(struct inode
*ip
, int mode
, int flag
, cred_t
*cr
)
191 znode_t
*zp
= ITOZ(ip
);
192 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
197 /* Honor ZFS_APPENDONLY file attribute */
198 if ((mode
& FMODE_WRITE
) && (zp
->z_pflags
& ZFS_APPENDONLY
) &&
199 ((flag
& O_APPEND
) == 0)) {
201 return (SET_ERROR(EPERM
));
204 /* Virus scan eligible files on open */
205 if (!zfs_has_ctldir(zp
) && zfsvfs
->z_vscan
&& S_ISREG(ip
->i_mode
) &&
206 !(zp
->z_pflags
& ZFS_AV_QUARANTINED
) && zp
->z_size
> 0) {
207 if (zfs_vscan(ip
, cr
, 0) != 0) {
209 return (SET_ERROR(EACCES
));
213 /* Keep a count of the synchronous opens in the znode */
215 atomic_inc_32(&zp
->z_sync_cnt
);
223 zfs_close(struct inode
*ip
, int flag
, cred_t
*cr
)
225 znode_t
*zp
= ITOZ(ip
);
226 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
231 /* Decrement the synchronous opens in the znode */
233 atomic_dec_32(&zp
->z_sync_cnt
);
235 if (!zfs_has_ctldir(zp
) && zfsvfs
->z_vscan
&& S_ISREG(ip
->i_mode
) &&
236 !(zp
->z_pflags
& ZFS_AV_QUARANTINED
) && zp
->z_size
> 0)
237 VERIFY(zfs_vscan(ip
, cr
, 1) == 0);
243 #if defined(SEEK_HOLE) && defined(SEEK_DATA)
245 * Lseek support for finding holes (cmd == SEEK_HOLE) and
246 * data (cmd == SEEK_DATA). "off" is an in/out parameter.
249 zfs_holey_common(struct inode
*ip
, int cmd
, loff_t
*off
)
251 znode_t
*zp
= ITOZ(ip
);
252 uint64_t noff
= (uint64_t)*off
; /* new offset */
257 file_sz
= zp
->z_size
;
258 if (noff
>= file_sz
) {
259 return (SET_ERROR(ENXIO
));
262 if (cmd
== SEEK_HOLE
)
267 error
= dmu_offset_next(ZTOZSB(zp
)->z_os
, zp
->z_id
, hole
, &noff
);
270 return (SET_ERROR(ENXIO
));
272 /* file was dirty, so fall back to using generic logic */
273 if (error
== EBUSY
) {
281 * We could find a hole that begins after the logical end-of-file,
282 * because dmu_offset_next() only works on whole blocks. If the
283 * EOF falls mid-block, then indicate that the "virtual hole"
284 * at the end of the file begins at the logical EOF, rather than
285 * at the end of the last block.
287 if (noff
> file_sz
) {
299 zfs_holey(struct inode
*ip
, int cmd
, loff_t
*off
)
301 znode_t
*zp
= ITOZ(ip
);
302 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
308 error
= zfs_holey_common(ip
, cmd
, off
);
313 #endif /* SEEK_HOLE && SEEK_DATA */
317 * When a file is memory mapped, we must keep the IO data synchronized
318 * between the DMU cache and the memory mapped pages. What this means:
320 * On Write: If we find a memory mapped page, we write to *both*
321 * the page and the dmu buffer.
324 update_pages(struct inode
*ip
, int64_t start
, int len
,
325 objset_t
*os
, uint64_t oid
)
327 struct address_space
*mp
= ip
->i_mapping
;
333 off
= start
& (PAGE_SIZE
-1);
334 for (start
&= PAGE_MASK
; len
> 0; start
+= PAGE_SIZE
) {
335 nbytes
= MIN(PAGE_SIZE
- off
, len
);
337 pp
= find_lock_page(mp
, start
>> PAGE_SHIFT
);
339 if (mapping_writably_mapped(mp
))
340 flush_dcache_page(pp
);
343 (void) dmu_read(os
, oid
, start
+off
, nbytes
, pb
+off
,
347 if (mapping_writably_mapped(mp
))
348 flush_dcache_page(pp
);
350 mark_page_accessed(pp
);
363 * When a file is memory mapped, we must keep the IO data synchronized
364 * between the DMU cache and the memory mapped pages. What this means:
366 * On Read: We "read" preferentially from memory mapped pages,
367 * else we default from the dmu buffer.
369 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
370 * the file is memory mapped.
373 mappedread(struct inode
*ip
, int nbytes
, uio_t
*uio
)
375 struct address_space
*mp
= ip
->i_mapping
;
377 znode_t
*zp
= ITOZ(ip
);
384 start
= uio
->uio_loffset
;
385 off
= start
& (PAGE_SIZE
-1);
386 for (start
&= PAGE_MASK
; len
> 0; start
+= PAGE_SIZE
) {
387 bytes
= MIN(PAGE_SIZE
- off
, len
);
389 pp
= find_lock_page(mp
, start
>> PAGE_SHIFT
);
391 ASSERT(PageUptodate(pp
));
395 error
= uiomove(pb
+ off
, bytes
, UIO_READ
, uio
);
398 if (mapping_writably_mapped(mp
))
399 flush_dcache_page(pp
);
401 mark_page_accessed(pp
);
404 error
= dmu_read_uio_dbuf(sa_get_db(zp
->z_sa_hdl
),
417 unsigned long zfs_read_chunk_size
= 1024 * 1024; /* Tunable */
418 unsigned long zfs_delete_blocks
= DMU_MAX_DELETEBLKCNT
;
421 * Read bytes from specified file into supplied buffer.
423 * IN: ip - inode of file to be read from.
424 * uio - structure supplying read location, range info,
426 * ioflag - FSYNC flags; used to provide FRSYNC semantics.
427 * O_DIRECT flag; used to bypass page cache.
428 * cr - credentials of caller.
430 * OUT: uio - updated offset and range, buffer filled.
432 * RETURN: 0 on success, error code on failure.
435 * inode - atime updated if byte count > 0
439 zfs_read(struct inode
*ip
, uio_t
*uio
, int ioflag
, cred_t
*cr
)
443 znode_t
*zp
= ITOZ(ip
);
444 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
448 if (zp
->z_pflags
& ZFS_AV_QUARANTINED
) {
450 return (SET_ERROR(EACCES
));
454 * Validate file offset
456 if (uio
->uio_loffset
< (offset_t
)0) {
458 return (SET_ERROR(EINVAL
));
462 * Fasttrack empty reads
464 if (uio
->uio_resid
== 0) {
470 * If we're in FRSYNC mode, sync out this znode before reading it.
471 * Only do this for non-snapshots.
474 (ioflag
& FRSYNC
|| zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
))
475 zil_commit(zfsvfs
->z_log
, zp
->z_id
);
478 * Lock the range against changes.
480 locked_range_t
*lr
= rangelock_enter(&zp
->z_rangelock
,
481 uio
->uio_loffset
, uio
->uio_resid
, RL_READER
);
484 * If we are reading past end-of-file we can skip
485 * to the end; but we might still need to set atime.
487 if (uio
->uio_loffset
>= zp
->z_size
) {
492 ASSERT(uio
->uio_loffset
< zp
->z_size
);
493 ssize_t n
= MIN(uio
->uio_resid
, zp
->z_size
- uio
->uio_loffset
);
494 ssize_t start_resid
= n
;
496 #ifdef HAVE_UIO_ZEROCOPY
498 if ((uio
->uio_extflg
== UIO_XUIO
) &&
499 (((xuio_t
*)uio
)->xu_type
== UIOTYPE_ZEROCOPY
)) {
501 int blksz
= zp
->z_blksz
;
502 uint64_t offset
= uio
->uio_loffset
;
504 xuio
= (xuio_t
*)uio
;
506 nblk
= (P2ROUNDUP(offset
+ n
, blksz
) - P2ALIGN(offset
,
509 ASSERT(offset
+ n
<= blksz
);
512 (void) dmu_xuio_init(xuio
, nblk
);
514 if (vn_has_cached_data(ip
)) {
516 * For simplicity, we always allocate a full buffer
517 * even if we only expect to read a portion of a block.
519 while (--nblk
>= 0) {
520 (void) dmu_xuio_add(xuio
,
521 dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
526 #endif /* HAVE_UIO_ZEROCOPY */
529 ssize_t nbytes
= MIN(n
, zfs_read_chunk_size
-
530 P2PHASE(uio
->uio_loffset
, zfs_read_chunk_size
));
532 if (zp
->z_is_mapped
&& !(ioflag
& O_DIRECT
)) {
533 error
= mappedread(ip
, nbytes
, uio
);
535 error
= dmu_read_uio_dbuf(sa_get_db(zp
->z_sa_hdl
),
540 /* convert checksum errors into IO errors */
542 error
= SET_ERROR(EIO
);
549 int64_t nread
= start_resid
- n
;
550 dataset_kstats_update_read_kstats(&zfsvfs
->z_kstat
, nread
);
551 task_io_account_read(nread
);
560 * Write the bytes to a file.
562 * IN: ip - inode of file to be written to.
563 * uio - structure supplying write location, range info,
565 * ioflag - FAPPEND flag set if in append mode.
566 * O_DIRECT flag; used to bypass page cache.
567 * cr - credentials of caller.
569 * OUT: uio - updated offset and range.
571 * RETURN: 0 if success
572 * error code if failure
575 * ip - ctime|mtime updated if byte count > 0
580 zfs_write(struct inode
*ip
, uio_t
*uio
, int ioflag
, cred_t
*cr
)
583 ssize_t start_resid
= uio
->uio_resid
;
586 * Fasttrack empty write
588 ssize_t n
= start_resid
;
592 rlim64_t limit
= uio
->uio_limit
;
593 if (limit
== RLIM64_INFINITY
|| limit
> MAXOFFSET_T
)
596 znode_t
*zp
= ITOZ(ip
);
597 zfsvfs_t
*zfsvfs
= ZTOZSB(zp
);
601 sa_bulk_attr_t bulk
[4];
603 uint64_t mtime
[2], ctime
[2];
604 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
, &mtime
, 16);
605 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
, &ctime
, 16);
606 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_SIZE(zfsvfs
), NULL
,
608 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zfsvfs
), NULL
,
612 * Callers might not be able to detect properly that we are read-only,
613 * so check it explicitly here.
615 if (zfs_is_readonly(zfsvfs
)) {
617 return (SET_ERROR(EROFS
));
621 * If immutable or not appending then return EPERM
623 if ((zp
->z_pflags
& (ZFS_IMMUTABLE
| ZFS_READONLY
)) ||
624 ((zp
->z_pflags
& ZFS_APPENDONLY
) && !(ioflag
& FAPPEND
) &&
625 (uio
->uio_loffset
< zp
->z_size
))) {
627 return (SET_ERROR(EPERM
));
631 * Validate file offset
633 offset_t woff
= ioflag
& FAPPEND
? zp
->z_size
: uio
->uio_loffset
;
636 return (SET_ERROR(EINVAL
));
639 int max_blksz
= zfsvfs
->z_max_blksz
;
643 * Pre-fault the pages to ensure slow (eg NFS) pages
645 * Skip this if uio contains loaned arc_buf.
647 #ifdef HAVE_UIO_ZEROCOPY
648 if ((uio
->uio_extflg
== UIO_XUIO
) &&
649 (((xuio_t
*)uio
)->xu_type
== UIOTYPE_ZEROCOPY
))
650 xuio
= (xuio_t
*)uio
;
653 if (uio_prefaultpages(MIN(n
, max_blksz
), uio
)) {
655 return (SET_ERROR(EFAULT
));
659 * If in append mode, set the io offset pointer to eof.
662 if (ioflag
& FAPPEND
) {
664 * Obtain an appending range lock to guarantee file append
665 * semantics. We reset the write offset once we have the lock.
667 lr
= rangelock_enter(&zp
->z_rangelock
, 0, n
, RL_APPEND
);
668 woff
= lr
->lr_offset
;
669 if (lr
->lr_length
== UINT64_MAX
) {
671 * We overlocked the file because this write will cause
672 * the file block size to increase.
673 * Note that zp_size cannot change with this lock held.
677 uio
->uio_loffset
= woff
;
680 * Note that if the file block size will change as a result of
681 * this write, then this range lock will lock the entire file
682 * so that we can re-write the block safely.
684 lr
= rangelock_enter(&zp
->z_rangelock
, woff
, n
, RL_WRITER
);
690 return (SET_ERROR(EFBIG
));
693 if ((woff
+ n
) > limit
|| woff
> (limit
- n
))
696 /* Will this write extend the file length? */
697 int write_eof
= (woff
+ n
> zp
->z_size
);
699 uint64_t end_size
= MAX(zp
->z_size
, woff
+ n
);
700 zilog_t
*zilog
= zfsvfs
->z_log
;
701 #ifdef HAVE_UIO_ZEROCOPY
703 const iovec_t
*iovp
= uio
->uio_iov
;
704 ASSERTV(int iovcnt
= uio
->uio_iovcnt
);
709 * Write the file in reasonable size chunks. Each chunk is written
710 * in a separate transaction; this keeps the intent log records small
711 * and allows us to do more fine-grained space accounting.
714 woff
= uio
->uio_loffset
;
716 if (zfs_id_overblockquota(zfsvfs
, DMU_USERUSED_OBJECT
,
717 KUID_TO_SUID(ip
->i_uid
)) ||
718 zfs_id_overblockquota(zfsvfs
, DMU_GROUPUSED_OBJECT
,
719 KGID_TO_SGID(ip
->i_gid
)) ||
720 (zp
->z_projid
!= ZFS_DEFAULT_PROJID
&&
721 zfs_id_overblockquota(zfsvfs
, DMU_PROJECTUSED_OBJECT
,
723 error
= SET_ERROR(EDQUOT
);
727 arc_buf_t
*abuf
= NULL
;
728 const iovec_t
*aiov
= NULL
;
730 #ifdef HAVE_UIO_ZEROCOPY
731 ASSERT(i_iov
< iovcnt
);
732 ASSERT3U(uio
->uio_segflg
, !=, UIO_BVEC
);
734 abuf
= dmu_xuio_arcbuf(xuio
, i_iov
);
735 dmu_xuio_clear(xuio
, i_iov
);
736 ASSERT((aiov
->iov_base
== abuf
->b_data
) ||
737 ((char *)aiov
->iov_base
- (char *)abuf
->b_data
+
738 aiov
->iov_len
== arc_buf_size(abuf
)));
741 } else if (n
>= max_blksz
&& woff
>= zp
->z_size
&&
742 P2PHASE(woff
, max_blksz
) == 0 &&
743 zp
->z_blksz
== max_blksz
) {
745 * This write covers a full block. "Borrow" a buffer
746 * from the dmu so that we can fill it before we enter
747 * a transaction. This avoids the possibility of
748 * holding up the transaction if the data copy hangs
749 * up on a pagefault (e.g., from an NFS server mapping).
753 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
755 ASSERT(abuf
!= NULL
);
756 ASSERT(arc_buf_size(abuf
) == max_blksz
);
757 if ((error
= uiocopy(abuf
->b_data
, max_blksz
,
758 UIO_WRITE
, uio
, &cbytes
))) {
759 dmu_return_arcbuf(abuf
);
762 ASSERT(cbytes
== max_blksz
);
766 * Start a transaction.
768 dmu_tx_t
*tx
= dmu_tx_create(zfsvfs
->z_os
);
769 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
770 dmu_tx_hold_write(tx
, zp
->z_id
, woff
, MIN(n
, max_blksz
));
771 zfs_sa_upgrade_txholds(tx
, zp
);
772 error
= dmu_tx_assign(tx
, TXG_WAIT
);
776 dmu_return_arcbuf(abuf
);
781 * If rangelock_enter() over-locked we grow the blocksize
782 * and then reduce the lock range. This will only happen
783 * on the first iteration since rangelock_reduce() will
784 * shrink down lr_length to the appropriate size.
786 if (lr
->lr_length
== UINT64_MAX
) {
789 if (zp
->z_blksz
> max_blksz
) {
791 * File's blocksize is already larger than the
792 * "recordsize" property. Only let it grow to
793 * the next power of 2.
795 ASSERT(!ISP2(zp
->z_blksz
));
796 new_blksz
= MIN(end_size
,
797 1 << highbit64(zp
->z_blksz
));
799 new_blksz
= MIN(end_size
, max_blksz
);
801 zfs_grow_blocksize(zp
, new_blksz
, tx
);
802 rangelock_reduce(lr
, woff
, n
);
806 * XXX - should we really limit each write to z_max_blksz?
807 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
809 ssize_t nbytes
= MIN(n
, max_blksz
- P2PHASE(woff
, max_blksz
));
813 tx_bytes
= uio
->uio_resid
;
814 uio
->uio_fault_disable
= B_TRUE
;
815 error
= dmu_write_uio_dbuf(sa_get_db(zp
->z_sa_hdl
),
817 if (error
== EFAULT
) {
819 if (uio_prefaultpages(MIN(n
, max_blksz
), uio
)) {
823 } else if (error
!= 0) {
827 tx_bytes
-= uio
->uio_resid
;
830 ASSERT(xuio
== NULL
|| tx_bytes
== aiov
->iov_len
);
832 * If this is not a full block write, but we are
833 * extending the file past EOF and this data starts
834 * block-aligned, use assign_arcbuf(). Otherwise,
835 * write via dmu_write().
837 if (tx_bytes
< max_blksz
&& (!write_eof
||
838 aiov
->iov_base
!= abuf
->b_data
)) {
840 dmu_write(zfsvfs
->z_os
, zp
->z_id
, woff
,
841 /* cppcheck-suppress nullPointer */
842 aiov
->iov_len
, aiov
->iov_base
, tx
);
843 dmu_return_arcbuf(abuf
);
844 xuio_stat_wbuf_copied();
846 ASSERT(xuio
|| tx_bytes
== max_blksz
);
847 dmu_assign_arcbuf_by_dbuf(
848 sa_get_db(zp
->z_sa_hdl
), woff
, abuf
, tx
);
850 ASSERT(tx_bytes
<= uio
->uio_resid
);
851 uioskip(uio
, tx_bytes
);
853 if (tx_bytes
&& zp
->z_is_mapped
&& !(ioflag
& O_DIRECT
)) {
854 update_pages(ip
, woff
,
855 tx_bytes
, zfsvfs
->z_os
, zp
->z_id
);
859 * If we made no progress, we're done. If we made even
860 * partial progress, update the znode and ZIL accordingly.
863 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_SIZE(zfsvfs
),
864 (void *)&zp
->z_size
, sizeof (uint64_t), tx
);
871 * Clear Set-UID/Set-GID bits on successful write if not
872 * privileged and at least one of the execute bits is set.
874 * It would be nice to to this after all writes have
875 * been done, but that would still expose the ISUID/ISGID
876 * to another app after the partial write is committed.
878 * Note: we don't call zfs_fuid_map_id() here because
879 * user 0 is not an ephemeral uid.
881 mutex_enter(&zp
->z_acl_lock
);
882 uint32_t uid
= KUID_TO_SUID(ip
->i_uid
);
883 if ((zp
->z_mode
& (S_IXUSR
| (S_IXUSR
>> 3) |
884 (S_IXUSR
>> 6))) != 0 &&
885 (zp
->z_mode
& (S_ISUID
| S_ISGID
)) != 0 &&
886 secpolicy_vnode_setid_retain(cr
,
887 ((zp
->z_mode
& S_ISUID
) != 0 && uid
== 0)) != 0) {
889 zp
->z_mode
&= ~(S_ISUID
| S_ISGID
);
890 ip
->i_mode
= newmode
= zp
->z_mode
;
891 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_MODE(zfsvfs
),
892 (void *)&newmode
, sizeof (uint64_t), tx
);
894 mutex_exit(&zp
->z_acl_lock
);
896 zfs_tstamp_update_setup(zp
, CONTENT_MODIFIED
, mtime
, ctime
);
899 * Update the file size (zp_size) if it has changed;
900 * account for possible concurrent updates.
902 while ((end_size
= zp
->z_size
) < uio
->uio_loffset
) {
903 (void) atomic_cas_64(&zp
->z_size
, end_size
,
908 * If we are replaying and eof is non zero then force
909 * the file size to the specified eof. Note, there's no
910 * concurrency during replay.
912 if (zfsvfs
->z_replay
&& zfsvfs
->z_replay_eof
!= 0)
913 zp
->z_size
= zfsvfs
->z_replay_eof
;
915 error
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, count
, tx
);
917 zfs_log_write(zilog
, tx
, TX_WRITE
, zp
, woff
, tx_bytes
, ioflag
,
923 ASSERT(tx_bytes
== nbytes
);
926 if (!xuio
&& n
> 0) {
927 if (uio_prefaultpages(MIN(n
, max_blksz
), uio
)) {
934 zfs_inode_update(zp
);
938 * If we're in replay mode, or we made no progress, return error.
939 * Otherwise, it's at least a partial write, so it's successful.
941 if (zfsvfs
->z_replay
|| uio
->uio_resid
== start_resid
) {
946 if (ioflag
& (FSYNC
| FDSYNC
) ||
947 zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
948 zil_commit(zilog
, zp
->z_id
);
950 int64_t nwritten
= start_resid
- uio
->uio_resid
;
951 dataset_kstats_update_write_kstats(&zfsvfs
->z_kstat
, nwritten
);
952 task_io_account_write(nwritten
);
959 * Drop a reference on the passed inode asynchronously. This ensures
960 * that the caller will never drop the last reference on an inode in
961 * the current context. Doing so while holding open a tx could result
962 * in a deadlock if iput_final() re-enters the filesystem code.
965 zfs_iput_async(struct inode
*ip
)
967 objset_t
*os
= ITOZSB(ip
)->z_os
;
969 ASSERT(atomic_read(&ip
->i_count
) > 0);
972 if (atomic_read(&ip
->i_count
) == 1)
973 VERIFY(taskq_dispatch(dsl_pool_iput_taskq(dmu_objset_pool(os
)),
974 (task_func_t
*)iput
, ip
, TQ_SLEEP
) != TASKQID_INVALID
);
980 zfs_get_done(zgd_t
*zgd
, int error
)
982 znode_t
*zp
= zgd
->zgd_private
;
985 dmu_buf_rele(zgd
->zgd_db
, zgd
);
987 rangelock_exit(zgd
->zgd_lr
);
990 * Release the vnode asynchronously as we currently have the
991 * txg stopped from syncing.
993 zfs_iput_async(ZTOI(zp
));
995 if (error
== 0 && zgd
->zgd_bp
)
996 zil_lwb_add_block(zgd
->zgd_lwb
, zgd
->zgd_bp
);
998 kmem_free(zgd
, sizeof (zgd_t
));
1002 static int zil_fault_io
= 0;
1006 * Get data to generate a TX_WRITE intent log record.
1009 zfs_get_data(void *arg
, lr_write_t
*lr
, char *buf
, struct lwb
*lwb
, zio_t
*zio
)
1011 zfsvfs_t
*zfsvfs
= arg
;
1012 objset_t
*os
= zfsvfs
->z_os
;
1014 uint64_t object
= lr
->lr_foid
;
1015 uint64_t offset
= lr
->lr_offset
;
1016 uint64_t size
= lr
->lr_length
;
1021 ASSERT3P(lwb
, !=, NULL
);
1022 ASSERT3P(zio
, !=, NULL
);
1023 ASSERT3U(size
, !=, 0);
1026 * Nothing to do if the file has been removed
1028 if (zfs_zget(zfsvfs
, object
, &zp
) != 0)
1029 return (SET_ERROR(ENOENT
));
1030 if (zp
->z_unlinked
) {
1032 * Release the vnode asynchronously as we currently have the
1033 * txg stopped from syncing.
1035 zfs_iput_async(ZTOI(zp
));
1036 return (SET_ERROR(ENOENT
));
1039 zgd
= (zgd_t
*)kmem_zalloc(sizeof (zgd_t
), KM_SLEEP
);
1041 zgd
->zgd_private
= zp
;
1044 * Write records come in two flavors: immediate and indirect.
1045 * For small writes it's cheaper to store the data with the
1046 * log record (immediate); for large writes it's cheaper to
1047 * sync the data and get a pointer to it (indirect) so that
1048 * we don't have to write the data twice.
1050 if (buf
!= NULL
) { /* immediate write */
1051 zgd
->zgd_lr
= rangelock_enter(&zp
->z_rangelock
,
1052 offset
, size
, RL_READER
);
1053 /* test for truncation needs to be done while range locked */
1054 if (offset
>= zp
->z_size
) {
1055 error
= SET_ERROR(ENOENT
);
1057 error
= dmu_read(os
, object
, offset
, size
, buf
,
1058 DMU_READ_NO_PREFETCH
);
1060 ASSERT(error
== 0 || error
== ENOENT
);
1061 } else { /* indirect write */
1063 * Have to lock the whole block to ensure when it's
1064 * written out and its checksum is being calculated
1065 * that no one can change the data. We need to re-check
1066 * blocksize after we get the lock in case it's changed!
1071 blkoff
= ISP2(size
) ? P2PHASE(offset
, size
) : offset
;
1073 zgd
->zgd_lr
= rangelock_enter(&zp
->z_rangelock
,
1074 offset
, size
, RL_READER
);
1075 if (zp
->z_blksz
== size
)
1078 rangelock_exit(zgd
->zgd_lr
);
1080 /* test for truncation needs to be done while range locked */
1081 if (lr
->lr_offset
>= zp
->z_size
)
1082 error
= SET_ERROR(ENOENT
);
1085 error
= SET_ERROR(EIO
);
1090 error
= dmu_buf_hold(os
, object
, offset
, zgd
, &db
,
1091 DMU_READ_NO_PREFETCH
);
1094 blkptr_t
*bp
= &lr
->lr_blkptr
;
1099 ASSERT(db
->db_offset
== offset
);
1100 ASSERT(db
->db_size
== size
);
1102 error
= dmu_sync(zio
, lr
->lr_common
.lrc_txg
,
1104 ASSERT(error
|| lr
->lr_length
<= size
);
1107 * On success, we need to wait for the write I/O
1108 * initiated by dmu_sync() to complete before we can
1109 * release this dbuf. We will finish everything up
1110 * in the zfs_get_done() callback.
1115 if (error
== EALREADY
) {
1116 lr
->lr_common
.lrc_txtype
= TX_WRITE2
;
1118 * TX_WRITE2 relies on the data previously
1119 * written by the TX_WRITE that caused
1120 * EALREADY. We zero out the BP because
1121 * it is the old, currently-on-disk BP,
1122 * so there's no need to zio_flush() its
1123 * vdevs (flushing would needlesly hurt
1124 * performance, and doesn't work on
1134 zfs_get_done(zgd
, error
);
1141 zfs_access(struct inode
*ip
, int mode
, int flag
, cred_t
*cr
)
1143 znode_t
*zp
= ITOZ(ip
);
1144 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
1150 if (flag
& V_ACE_MASK
)
1151 error
= zfs_zaccess(zp
, mode
, flag
, B_FALSE
, cr
);
1153 error
= zfs_zaccess_rwx(zp
, mode
, flag
, cr
);
1160 * Lookup an entry in a directory, or an extended attribute directory.
1161 * If it exists, return a held inode reference for it.
1163 * IN: dip - inode of directory to search.
1164 * nm - name of entry to lookup.
1165 * flags - LOOKUP_XATTR set if looking for an attribute.
1166 * cr - credentials of caller.
1167 * direntflags - directory lookup flags
1168 * realpnp - returned pathname.
1170 * OUT: ipp - inode of located entry, NULL if not found.
1172 * RETURN: 0 on success, error code on failure.
1179 zfs_lookup(struct inode
*dip
, char *nm
, struct inode
**ipp
, int flags
,
1180 cred_t
*cr
, int *direntflags
, pathname_t
*realpnp
)
1182 znode_t
*zdp
= ITOZ(dip
);
1183 zfsvfs_t
*zfsvfs
= ITOZSB(dip
);
1187 * Fast path lookup, however we must skip DNLC lookup
1188 * for case folding or normalizing lookups because the
1189 * DNLC code only stores the passed in name. This means
1190 * creating 'a' and removing 'A' on a case insensitive
1191 * file system would work, but DNLC still thinks 'a'
1192 * exists and won't let you create it again on the next
1193 * pass through fast path.
1195 if (!(flags
& (LOOKUP_XATTR
| FIGNORECASE
))) {
1197 if (!S_ISDIR(dip
->i_mode
)) {
1198 return (SET_ERROR(ENOTDIR
));
1199 } else if (zdp
->z_sa_hdl
== NULL
) {
1200 return (SET_ERROR(EIO
));
1203 if (nm
[0] == 0 || (nm
[0] == '.' && nm
[1] == '\0')) {
1204 error
= zfs_fastaccesschk_execute(zdp
, cr
);
1212 } else if (!zdp
->z_zfsvfs
->z_norm
&&
1213 (zdp
->z_zfsvfs
->z_case
== ZFS_CASE_SENSITIVE
)) {
1215 vnode_t
*tvp
= dnlc_lookup(dvp
, nm
);
1218 error
= zfs_fastaccesschk_execute(zdp
, cr
);
1223 if (tvp
== DNLC_NO_VNODE
) {
1225 return (SET_ERROR(ENOENT
));
1228 return (specvp_check(vpp
, cr
));
1231 #endif /* HAVE_DNLC */
1240 if (flags
& LOOKUP_XATTR
) {
1242 * We don't allow recursive attributes..
1243 * Maybe someday we will.
1245 if (zdp
->z_pflags
& ZFS_XATTR
) {
1247 return (SET_ERROR(EINVAL
));
1250 if ((error
= zfs_get_xattrdir(zdp
, ipp
, cr
, flags
))) {
1256 * Do we have permission to get into attribute directory?
1259 if ((error
= zfs_zaccess(ITOZ(*ipp
), ACE_EXECUTE
, 0,
1269 if (!S_ISDIR(dip
->i_mode
)) {
1271 return (SET_ERROR(ENOTDIR
));
1275 * Check accessibility of directory.
1278 if ((error
= zfs_zaccess(zdp
, ACE_EXECUTE
, 0, B_FALSE
, cr
))) {
1283 if (zfsvfs
->z_utf8
&& u8_validate(nm
, strlen(nm
),
1284 NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
1286 return (SET_ERROR(EILSEQ
));
1289 error
= zfs_dirlook(zdp
, nm
, ipp
, flags
, direntflags
, realpnp
);
1290 if ((error
== 0) && (*ipp
))
1291 zfs_inode_update(ITOZ(*ipp
));
1298 * Attempt to create a new entry in a directory. If the entry
1299 * already exists, truncate the file if permissible, else return
1300 * an error. Return the ip of the created or trunc'd file.
1302 * IN: dip - inode of directory to put new file entry in.
1303 * name - name of new file entry.
1304 * vap - attributes of new file.
1305 * excl - flag indicating exclusive or non-exclusive mode.
1306 * mode - mode to open file with.
1307 * cr - credentials of caller.
1308 * flag - large file flag [UNUSED].
1309 * vsecp - ACL to be set
1311 * OUT: ipp - inode of created or trunc'd entry.
1313 * RETURN: 0 on success, error code on failure.
1316 * dip - ctime|mtime updated if new entry created
1317 * ip - ctime|mtime always, atime if new
1322 zfs_create(struct inode
*dip
, char *name
, vattr_t
*vap
, int excl
,
1323 int mode
, struct inode
**ipp
, cred_t
*cr
, int flag
, vsecattr_t
*vsecp
)
1325 znode_t
*zp
, *dzp
= ITOZ(dip
);
1326 zfsvfs_t
*zfsvfs
= ITOZSB(dip
);
1334 zfs_acl_ids_t acl_ids
;
1335 boolean_t fuid_dirtied
;
1336 boolean_t have_acl
= B_FALSE
;
1337 boolean_t waited
= B_FALSE
;
1340 * If we have an ephemeral id, ACL, or XVATTR then
1341 * make sure file system is at proper version
1347 if (zfsvfs
->z_use_fuids
== B_FALSE
&&
1348 (vsecp
|| IS_EPHEMERAL(uid
) || IS_EPHEMERAL(gid
)))
1349 return (SET_ERROR(EINVAL
));
1352 return (SET_ERROR(EINVAL
));
1357 zilog
= zfsvfs
->z_log
;
1359 if (zfsvfs
->z_utf8
&& u8_validate(name
, strlen(name
),
1360 NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
1362 return (SET_ERROR(EILSEQ
));
1365 if (vap
->va_mask
& ATTR_XVATTR
) {
1366 if ((error
= secpolicy_xvattr((xvattr_t
*)vap
,
1367 crgetuid(cr
), cr
, vap
->va_mode
)) != 0) {
1375 if (*name
== '\0') {
1377 * Null component name refers to the directory itself.
1384 /* possible igrab(zp) */
1387 if (flag
& FIGNORECASE
)
1390 error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
,
1394 zfs_acl_ids_free(&acl_ids
);
1395 if (strcmp(name
, "..") == 0)
1396 error
= SET_ERROR(EISDIR
);
1404 uint64_t projid
= ZFS_DEFAULT_PROJID
;
1407 * Create a new file object and update the directory
1410 if ((error
= zfs_zaccess(dzp
, ACE_ADD_FILE
, 0, B_FALSE
, cr
))) {
1412 zfs_acl_ids_free(&acl_ids
);
1417 * We only support the creation of regular files in
1418 * extended attribute directories.
1421 if ((dzp
->z_pflags
& ZFS_XATTR
) && !S_ISREG(vap
->va_mode
)) {
1423 zfs_acl_ids_free(&acl_ids
);
1424 error
= SET_ERROR(EINVAL
);
1428 if (!have_acl
&& (error
= zfs_acl_ids_create(dzp
, 0, vap
,
1429 cr
, vsecp
, &acl_ids
)) != 0)
1433 if (S_ISREG(vap
->va_mode
) || S_ISDIR(vap
->va_mode
))
1434 projid
= zfs_inherit_projid(dzp
);
1435 if (zfs_acl_ids_overquota(zfsvfs
, &acl_ids
, projid
)) {
1436 zfs_acl_ids_free(&acl_ids
);
1437 error
= SET_ERROR(EDQUOT
);
1441 tx
= dmu_tx_create(os
);
1443 dmu_tx_hold_sa_create(tx
, acl_ids
.z_aclp
->z_acl_bytes
+
1444 ZFS_SA_BASE_ATTR_SIZE
);
1446 fuid_dirtied
= zfsvfs
->z_fuid_dirty
;
1448 zfs_fuid_txhold(zfsvfs
, tx
);
1449 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, name
);
1450 dmu_tx_hold_sa(tx
, dzp
->z_sa_hdl
, B_FALSE
);
1451 if (!zfsvfs
->z_use_sa
&&
1452 acl_ids
.z_aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
1453 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
,
1454 0, acl_ids
.z_aclp
->z_acl_bytes
);
1457 error
= dmu_tx_assign(tx
,
1458 (waited
? TXG_NOTHROTTLE
: 0) | TXG_NOWAIT
);
1460 zfs_dirent_unlock(dl
);
1461 if (error
== ERESTART
) {
1467 zfs_acl_ids_free(&acl_ids
);
1472 zfs_mknode(dzp
, vap
, tx
, cr
, 0, &zp
, &acl_ids
);
1474 error
= zfs_link_create(dl
, zp
, tx
, ZNEW
);
1477 * Since, we failed to add the directory entry for it,
1478 * delete the newly created dnode.
1480 zfs_znode_delete(zp
, tx
);
1481 remove_inode_hash(ZTOI(zp
));
1482 zfs_acl_ids_free(&acl_ids
);
1488 zfs_fuid_sync(zfsvfs
, tx
);
1490 txtype
= zfs_log_create_txtype(Z_FILE
, vsecp
, vap
);
1491 if (flag
& FIGNORECASE
)
1493 zfs_log_create(zilog
, tx
, txtype
, dzp
, zp
, name
,
1494 vsecp
, acl_ids
.z_fuidp
, vap
);
1495 zfs_acl_ids_free(&acl_ids
);
1498 int aflags
= (flag
& FAPPEND
) ? V_APPEND
: 0;
1501 zfs_acl_ids_free(&acl_ids
);
1505 * A directory entry already exists for this name.
1508 * Can't truncate an existing file if in exclusive mode.
1511 error
= SET_ERROR(EEXIST
);
1515 * Can't open a directory for writing.
1517 if (S_ISDIR(ZTOI(zp
)->i_mode
)) {
1518 error
= SET_ERROR(EISDIR
);
1522 * Verify requested access to file.
1524 if (mode
&& (error
= zfs_zaccess_rwx(zp
, mode
, aflags
, cr
))) {
1528 mutex_enter(&dzp
->z_lock
);
1530 mutex_exit(&dzp
->z_lock
);
1533 * Truncate regular files if requested.
1535 if (S_ISREG(ZTOI(zp
)->i_mode
) &&
1536 (vap
->va_mask
& ATTR_SIZE
) && (vap
->va_size
== 0)) {
1537 /* we can't hold any locks when calling zfs_freesp() */
1539 zfs_dirent_unlock(dl
);
1542 error
= zfs_freesp(zp
, 0, 0, mode
, TRUE
);
1548 zfs_dirent_unlock(dl
);
1554 zfs_inode_update(dzp
);
1555 zfs_inode_update(zp
);
1559 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
1560 zil_commit(zilog
, 0);
1568 zfs_tmpfile(struct inode
*dip
, vattr_t
*vap
, int excl
,
1569 int mode
, struct inode
**ipp
, cred_t
*cr
, int flag
, vsecattr_t
*vsecp
)
1571 znode_t
*zp
= NULL
, *dzp
= ITOZ(dip
);
1572 zfsvfs_t
*zfsvfs
= ITOZSB(dip
);
1578 zfs_acl_ids_t acl_ids
;
1579 uint64_t projid
= ZFS_DEFAULT_PROJID
;
1580 boolean_t fuid_dirtied
;
1581 boolean_t have_acl
= B_FALSE
;
1582 boolean_t waited
= B_FALSE
;
1585 * If we have an ephemeral id, ACL, or XVATTR then
1586 * make sure file system is at proper version
1592 if (zfsvfs
->z_use_fuids
== B_FALSE
&&
1593 (vsecp
|| IS_EPHEMERAL(uid
) || IS_EPHEMERAL(gid
)))
1594 return (SET_ERROR(EINVAL
));
1600 if (vap
->va_mask
& ATTR_XVATTR
) {
1601 if ((error
= secpolicy_xvattr((xvattr_t
*)vap
,
1602 crgetuid(cr
), cr
, vap
->va_mode
)) != 0) {
1612 * Create a new file object and update the directory
1615 if ((error
= zfs_zaccess(dzp
, ACE_ADD_FILE
, 0, B_FALSE
, cr
))) {
1617 zfs_acl_ids_free(&acl_ids
);
1621 if (!have_acl
&& (error
= zfs_acl_ids_create(dzp
, 0, vap
,
1622 cr
, vsecp
, &acl_ids
)) != 0)
1626 if (S_ISREG(vap
->va_mode
) || S_ISDIR(vap
->va_mode
))
1627 projid
= zfs_inherit_projid(dzp
);
1628 if (zfs_acl_ids_overquota(zfsvfs
, &acl_ids
, projid
)) {
1629 zfs_acl_ids_free(&acl_ids
);
1630 error
= SET_ERROR(EDQUOT
);
1634 tx
= dmu_tx_create(os
);
1636 dmu_tx_hold_sa_create(tx
, acl_ids
.z_aclp
->z_acl_bytes
+
1637 ZFS_SA_BASE_ATTR_SIZE
);
1638 dmu_tx_hold_zap(tx
, zfsvfs
->z_unlinkedobj
, FALSE
, NULL
);
1640 fuid_dirtied
= zfsvfs
->z_fuid_dirty
;
1642 zfs_fuid_txhold(zfsvfs
, tx
);
1643 if (!zfsvfs
->z_use_sa
&&
1644 acl_ids
.z_aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
1645 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
,
1646 0, acl_ids
.z_aclp
->z_acl_bytes
);
1648 error
= dmu_tx_assign(tx
, (waited
? TXG_NOTHROTTLE
: 0) | TXG_NOWAIT
);
1650 if (error
== ERESTART
) {
1656 zfs_acl_ids_free(&acl_ids
);
1661 zfs_mknode(dzp
, vap
, tx
, cr
, IS_TMPFILE
, &zp
, &acl_ids
);
1664 zfs_fuid_sync(zfsvfs
, tx
);
1666 /* Add to unlinked set */
1668 zfs_unlinked_add(zp
, tx
);
1669 zfs_acl_ids_free(&acl_ids
);
1677 zfs_inode_update(dzp
);
1678 zfs_inode_update(zp
);
1687 * Remove an entry from a directory.
1689 * IN: dip - inode of directory to remove entry from.
1690 * name - name of entry to remove.
1691 * cr - credentials of caller.
1693 * RETURN: 0 if success
1694 * error code if failure
1698 * ip - ctime (if nlink > 0)
1701 uint64_t null_xattr
= 0;
1705 zfs_remove(struct inode
*dip
, char *name
, cred_t
*cr
, int flags
)
1707 znode_t
*zp
, *dzp
= ITOZ(dip
);
1710 zfsvfs_t
*zfsvfs
= ITOZSB(dip
);
1712 uint64_t acl_obj
, xattr_obj
;
1713 uint64_t xattr_obj_unlinked
= 0;
1718 boolean_t may_delete_now
, delete_now
= FALSE
;
1719 boolean_t unlinked
, toobig
= FALSE
;
1721 pathname_t
*realnmp
= NULL
;
1725 boolean_t waited
= B_FALSE
;
1728 return (SET_ERROR(EINVAL
));
1732 zilog
= zfsvfs
->z_log
;
1734 if (flags
& FIGNORECASE
) {
1744 * Attempt to lock directory; fail if entry doesn't exist.
1746 if ((error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
,
1756 if ((error
= zfs_zaccess_delete(dzp
, zp
, cr
))) {
1761 * Need to use rmdir for removing directories.
1763 if (S_ISDIR(ip
->i_mode
)) {
1764 error
= SET_ERROR(EPERM
);
1770 dnlc_remove(dvp
, realnmp
->pn_buf
);
1772 dnlc_remove(dvp
, name
);
1773 #endif /* HAVE_DNLC */
1775 mutex_enter(&zp
->z_lock
);
1776 may_delete_now
= atomic_read(&ip
->i_count
) == 1 && !(zp
->z_is_mapped
);
1777 mutex_exit(&zp
->z_lock
);
1780 * We may delete the znode now, or we may put it in the unlinked set;
1781 * it depends on whether we're the last link, and on whether there are
1782 * other holds on the inode. So we dmu_tx_hold() the right things to
1783 * allow for either case.
1786 tx
= dmu_tx_create(zfsvfs
->z_os
);
1787 dmu_tx_hold_zap(tx
, dzp
->z_id
, FALSE
, name
);
1788 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
1789 zfs_sa_upgrade_txholds(tx
, zp
);
1790 zfs_sa_upgrade_txholds(tx
, dzp
);
1791 if (may_delete_now
) {
1792 toobig
= zp
->z_size
> zp
->z_blksz
* zfs_delete_blocks
;
1793 /* if the file is too big, only hold_free a token amount */
1794 dmu_tx_hold_free(tx
, zp
->z_id
, 0,
1795 (toobig
? DMU_MAX_ACCESS
: DMU_OBJECT_END
));
1798 /* are there any extended attributes? */
1799 error
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_XATTR(zfsvfs
),
1800 &xattr_obj
, sizeof (xattr_obj
));
1801 if (error
== 0 && xattr_obj
) {
1802 error
= zfs_zget(zfsvfs
, xattr_obj
, &xzp
);
1804 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_TRUE
);
1805 dmu_tx_hold_sa(tx
, xzp
->z_sa_hdl
, B_FALSE
);
1808 mutex_enter(&zp
->z_lock
);
1809 if ((acl_obj
= zfs_external_acl(zp
)) != 0 && may_delete_now
)
1810 dmu_tx_hold_free(tx
, acl_obj
, 0, DMU_OBJECT_END
);
1811 mutex_exit(&zp
->z_lock
);
1813 /* charge as an update -- would be nice not to charge at all */
1814 dmu_tx_hold_zap(tx
, zfsvfs
->z_unlinkedobj
, FALSE
, NULL
);
1817 * Mark this transaction as typically resulting in a net free of space
1819 dmu_tx_mark_netfree(tx
);
1821 error
= dmu_tx_assign(tx
, (waited
? TXG_NOTHROTTLE
: 0) | TXG_NOWAIT
);
1823 zfs_dirent_unlock(dl
);
1824 if (error
== ERESTART
) {
1844 * Remove the directory entry.
1846 error
= zfs_link_destroy(dl
, zp
, tx
, zflg
, &unlinked
);
1855 * Hold z_lock so that we can make sure that the ACL obj
1856 * hasn't changed. Could have been deleted due to
1859 mutex_enter(&zp
->z_lock
);
1860 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_XATTR(zfsvfs
),
1861 &xattr_obj_unlinked
, sizeof (xattr_obj_unlinked
));
1862 delete_now
= may_delete_now
&& !toobig
&&
1863 atomic_read(&ip
->i_count
) == 1 && !(zp
->z_is_mapped
) &&
1864 xattr_obj
== xattr_obj_unlinked
&& zfs_external_acl(zp
) ==
1869 if (xattr_obj_unlinked
) {
1870 ASSERT3U(ZTOI(xzp
)->i_nlink
, ==, 2);
1871 mutex_enter(&xzp
->z_lock
);
1872 xzp
->z_unlinked
= 1;
1873 clear_nlink(ZTOI(xzp
));
1875 error
= sa_update(xzp
->z_sa_hdl
, SA_ZPL_LINKS(zfsvfs
),
1876 &links
, sizeof (links
), tx
);
1877 ASSERT3U(error
, ==, 0);
1878 mutex_exit(&xzp
->z_lock
);
1879 zfs_unlinked_add(xzp
, tx
);
1882 error
= sa_remove(zp
->z_sa_hdl
,
1883 SA_ZPL_XATTR(zfsvfs
), tx
);
1885 error
= sa_update(zp
->z_sa_hdl
,
1886 SA_ZPL_XATTR(zfsvfs
), &null_xattr
,
1887 sizeof (uint64_t), tx
);
1891 * Add to the unlinked set because a new reference could be
1892 * taken concurrently resulting in a deferred destruction.
1894 zfs_unlinked_add(zp
, tx
);
1895 mutex_exit(&zp
->z_lock
);
1896 } else if (unlinked
) {
1897 mutex_exit(&zp
->z_lock
);
1898 zfs_unlinked_add(zp
, tx
);
1902 if (flags
& FIGNORECASE
)
1904 zfs_log_remove(zilog
, tx
, txtype
, dzp
, name
, obj
);
1911 zfs_dirent_unlock(dl
);
1912 zfs_inode_update(dzp
);
1913 zfs_inode_update(zp
);
1921 zfs_inode_update(xzp
);
1922 zfs_iput_async(ZTOI(xzp
));
1925 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
1926 zil_commit(zilog
, 0);
1933 * Create a new directory and insert it into dip using the name
1934 * provided. Return a pointer to the inserted directory.
1936 * IN: dip - inode of directory to add subdir to.
1937 * dirname - name of new directory.
1938 * vap - attributes of new directory.
1939 * cr - credentials of caller.
1940 * vsecp - ACL to be set
1942 * OUT: ipp - inode of created directory.
1944 * RETURN: 0 if success
1945 * error code if failure
1948 * dip - ctime|mtime updated
1949 * ipp - ctime|mtime|atime updated
1953 zfs_mkdir(struct inode
*dip
, char *dirname
, vattr_t
*vap
, struct inode
**ipp
,
1954 cred_t
*cr
, int flags
, vsecattr_t
*vsecp
)
1956 znode_t
*zp
, *dzp
= ITOZ(dip
);
1957 zfsvfs_t
*zfsvfs
= ITOZSB(dip
);
1965 gid_t gid
= crgetgid(cr
);
1966 zfs_acl_ids_t acl_ids
;
1967 boolean_t fuid_dirtied
;
1968 boolean_t waited
= B_FALSE
;
1970 ASSERT(S_ISDIR(vap
->va_mode
));
1973 * If we have an ephemeral id, ACL, or XVATTR then
1974 * make sure file system is at proper version
1978 if (zfsvfs
->z_use_fuids
== B_FALSE
&&
1979 (vsecp
|| IS_EPHEMERAL(uid
) || IS_EPHEMERAL(gid
)))
1980 return (SET_ERROR(EINVAL
));
1982 if (dirname
== NULL
)
1983 return (SET_ERROR(EINVAL
));
1987 zilog
= zfsvfs
->z_log
;
1989 if (dzp
->z_pflags
& ZFS_XATTR
) {
1991 return (SET_ERROR(EINVAL
));
1994 if (zfsvfs
->z_utf8
&& u8_validate(dirname
,
1995 strlen(dirname
), NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
1997 return (SET_ERROR(EILSEQ
));
1999 if (flags
& FIGNORECASE
)
2002 if (vap
->va_mask
& ATTR_XVATTR
) {
2003 if ((error
= secpolicy_xvattr((xvattr_t
*)vap
,
2004 crgetuid(cr
), cr
, vap
->va_mode
)) != 0) {
2010 if ((error
= zfs_acl_ids_create(dzp
, 0, vap
, cr
,
2011 vsecp
, &acl_ids
)) != 0) {
2016 * First make sure the new directory doesn't exist.
2018 * Existence is checked first to make sure we don't return
2019 * EACCES instead of EEXIST which can cause some applications
2025 if ((error
= zfs_dirent_lock(&dl
, dzp
, dirname
, &zp
, zf
,
2027 zfs_acl_ids_free(&acl_ids
);
2032 if ((error
= zfs_zaccess(dzp
, ACE_ADD_SUBDIRECTORY
, 0, B_FALSE
, cr
))) {
2033 zfs_acl_ids_free(&acl_ids
);
2034 zfs_dirent_unlock(dl
);
2039 if (zfs_acl_ids_overquota(zfsvfs
, &acl_ids
, zfs_inherit_projid(dzp
))) {
2040 zfs_acl_ids_free(&acl_ids
);
2041 zfs_dirent_unlock(dl
);
2043 return (SET_ERROR(EDQUOT
));
2047 * Add a new entry to the directory.
2049 tx
= dmu_tx_create(zfsvfs
->z_os
);
2050 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, dirname
);
2051 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, FALSE
, NULL
);
2052 fuid_dirtied
= zfsvfs
->z_fuid_dirty
;
2054 zfs_fuid_txhold(zfsvfs
, tx
);
2055 if (!zfsvfs
->z_use_sa
&& acl_ids
.z_aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
2056 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
, 0,
2057 acl_ids
.z_aclp
->z_acl_bytes
);
2060 dmu_tx_hold_sa_create(tx
, acl_ids
.z_aclp
->z_acl_bytes
+
2061 ZFS_SA_BASE_ATTR_SIZE
);
2063 error
= dmu_tx_assign(tx
, (waited
? TXG_NOTHROTTLE
: 0) | TXG_NOWAIT
);
2065 zfs_dirent_unlock(dl
);
2066 if (error
== ERESTART
) {
2072 zfs_acl_ids_free(&acl_ids
);
2081 zfs_mknode(dzp
, vap
, tx
, cr
, 0, &zp
, &acl_ids
);
2084 * Now put new name in parent dir.
2086 error
= zfs_link_create(dl
, zp
, tx
, ZNEW
);
2088 zfs_znode_delete(zp
, tx
);
2089 remove_inode_hash(ZTOI(zp
));
2094 zfs_fuid_sync(zfsvfs
, tx
);
2098 txtype
= zfs_log_create_txtype(Z_DIR
, vsecp
, vap
);
2099 if (flags
& FIGNORECASE
)
2101 zfs_log_create(zilog
, tx
, txtype
, dzp
, zp
, dirname
, vsecp
,
2102 acl_ids
.z_fuidp
, vap
);
2105 zfs_acl_ids_free(&acl_ids
);
2109 zfs_dirent_unlock(dl
);
2111 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
2112 zil_commit(zilog
, 0);
2117 zfs_inode_update(dzp
);
2118 zfs_inode_update(zp
);
2125 * Remove a directory subdir entry. If the current working
2126 * directory is the same as the subdir to be removed, the
2129 * IN: dip - inode of directory to remove from.
2130 * name - name of directory to be removed.
2131 * cwd - inode of current working directory.
2132 * cr - credentials of caller.
2133 * flags - case flags
2135 * RETURN: 0 on success, error code on failure.
2138 * dip - ctime|mtime updated
2142 zfs_rmdir(struct inode
*dip
, char *name
, struct inode
*cwd
, cred_t
*cr
,
2145 znode_t
*dzp
= ITOZ(dip
);
2148 zfsvfs_t
*zfsvfs
= ITOZSB(dip
);
2154 boolean_t waited
= B_FALSE
;
2157 return (SET_ERROR(EINVAL
));
2161 zilog
= zfsvfs
->z_log
;
2163 if (flags
& FIGNORECASE
)
2169 * Attempt to lock directory; fail if entry doesn't exist.
2171 if ((error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
,
2179 if ((error
= zfs_zaccess_delete(dzp
, zp
, cr
))) {
2183 if (!S_ISDIR(ip
->i_mode
)) {
2184 error
= SET_ERROR(ENOTDIR
);
2189 error
= SET_ERROR(EINVAL
);
2194 * Grab a lock on the directory to make sure that no one is
2195 * trying to add (or lookup) entries while we are removing it.
2197 rw_enter(&zp
->z_name_lock
, RW_WRITER
);
2200 * Grab a lock on the parent pointer to make sure we play well
2201 * with the treewalk and directory rename code.
2203 rw_enter(&zp
->z_parent_lock
, RW_WRITER
);
2205 tx
= dmu_tx_create(zfsvfs
->z_os
);
2206 dmu_tx_hold_zap(tx
, dzp
->z_id
, FALSE
, name
);
2207 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
2208 dmu_tx_hold_zap(tx
, zfsvfs
->z_unlinkedobj
, FALSE
, NULL
);
2209 zfs_sa_upgrade_txholds(tx
, zp
);
2210 zfs_sa_upgrade_txholds(tx
, dzp
);
2211 dmu_tx_mark_netfree(tx
);
2212 error
= dmu_tx_assign(tx
, (waited
? TXG_NOTHROTTLE
: 0) | TXG_NOWAIT
);
2214 rw_exit(&zp
->z_parent_lock
);
2215 rw_exit(&zp
->z_name_lock
);
2216 zfs_dirent_unlock(dl
);
2217 if (error
== ERESTART
) {
2230 error
= zfs_link_destroy(dl
, zp
, tx
, zflg
, NULL
);
2233 uint64_t txtype
= TX_RMDIR
;
2234 if (flags
& FIGNORECASE
)
2236 zfs_log_remove(zilog
, tx
, txtype
, dzp
, name
, ZFS_NO_OBJECT
);
2241 rw_exit(&zp
->z_parent_lock
);
2242 rw_exit(&zp
->z_name_lock
);
2244 zfs_dirent_unlock(dl
);
2246 zfs_inode_update(dzp
);
2247 zfs_inode_update(zp
);
2250 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
2251 zil_commit(zilog
, 0);
2258 * Read as many directory entries as will fit into the provided
2259 * dirent buffer from the given directory cursor position.
2261 * IN: ip - inode of directory to read.
2262 * dirent - buffer for directory entries.
2264 * OUT: dirent - filler buffer of directory entries.
2266 * RETURN: 0 if success
2267 * error code if failure
2270 * ip - atime updated
2272 * Note that the low 4 bits of the cookie returned by zap is always zero.
2273 * This allows us to use the low range for "special" directory entries:
2274 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
2275 * we use the offset 2 for the '.zfs' directory.
2279 zfs_readdir(struct inode
*ip
, zpl_dir_context_t
*ctx
, cred_t
*cr
)
2281 znode_t
*zp
= ITOZ(ip
);
2282 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
2285 zap_attribute_t zap
;
2291 uint64_t offset
; /* must be unsigned; checks for < 1 */
2296 if ((error
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_PARENT(zfsvfs
),
2297 &parent
, sizeof (parent
))) != 0)
2301 * Quit if directory has been removed (posix)
2309 prefetch
= zp
->z_zn_prefetch
;
2312 * Initialize the iterator cursor.
2316 * Start iteration from the beginning of the directory.
2318 zap_cursor_init(&zc
, os
, zp
->z_id
);
2321 * The offset is a serialized cursor.
2323 zap_cursor_init_serialized(&zc
, os
, zp
->z_id
, offset
);
2327 * Transform to file-system independent format
2332 * Special case `.', `..', and `.zfs'.
2335 (void) strcpy(zap
.za_name
, ".");
2336 zap
.za_normalization_conflict
= 0;
2339 } else if (offset
== 1) {
2340 (void) strcpy(zap
.za_name
, "..");
2341 zap
.za_normalization_conflict
= 0;
2344 } else if (offset
== 2 && zfs_show_ctldir(zp
)) {
2345 (void) strcpy(zap
.za_name
, ZFS_CTLDIR_NAME
);
2346 zap
.za_normalization_conflict
= 0;
2347 objnum
= ZFSCTL_INO_ROOT
;
2353 if ((error
= zap_cursor_retrieve(&zc
, &zap
))) {
2354 if (error
== ENOENT
)
2361 * Allow multiple entries provided the first entry is
2362 * the object id. Non-zpl consumers may safely make
2363 * use of the additional space.
2365 * XXX: This should be a feature flag for compatibility
2367 if (zap
.za_integer_length
!= 8 ||
2368 zap
.za_num_integers
== 0) {
2369 cmn_err(CE_WARN
, "zap_readdir: bad directory "
2370 "entry, obj = %lld, offset = %lld, "
2371 "length = %d, num = %lld\n",
2372 (u_longlong_t
)zp
->z_id
,
2373 (u_longlong_t
)offset
,
2374 zap
.za_integer_length
,
2375 (u_longlong_t
)zap
.za_num_integers
);
2376 error
= SET_ERROR(ENXIO
);
2380 objnum
= ZFS_DIRENT_OBJ(zap
.za_first_integer
);
2381 type
= ZFS_DIRENT_TYPE(zap
.za_first_integer
);
2384 done
= !zpl_dir_emit(ctx
, zap
.za_name
, strlen(zap
.za_name
),
2389 /* Prefetch znode */
2391 dmu_prefetch(os
, objnum
, 0, 0, 0,
2392 ZIO_PRIORITY_SYNC_READ
);
2396 * Move to the next entry, fill in the previous offset.
2398 if (offset
> 2 || (offset
== 2 && !zfs_show_ctldir(zp
))) {
2399 zap_cursor_advance(&zc
);
2400 offset
= zap_cursor_serialize(&zc
);
2406 zp
->z_zn_prefetch
= B_FALSE
; /* a lookup will re-enable pre-fetching */
2409 zap_cursor_fini(&zc
);
2410 if (error
== ENOENT
)
2418 ulong_t zfs_fsync_sync_cnt
= 4;
2421 zfs_fsync(struct inode
*ip
, int syncflag
, cred_t
*cr
)
2423 znode_t
*zp
= ITOZ(ip
);
2424 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
2426 (void) tsd_set(zfs_fsyncer_key
, (void *)zfs_fsync_sync_cnt
);
2428 if (zfsvfs
->z_os
->os_sync
!= ZFS_SYNC_DISABLED
) {
2431 zil_commit(zfsvfs
->z_log
, zp
->z_id
);
2434 tsd_set(zfs_fsyncer_key
, NULL
);
2441 * Get the requested file attributes and place them in the provided
2444 * IN: ip - inode of file.
2445 * vap - va_mask identifies requested attributes.
2446 * If ATTR_XVATTR set, then optional attrs are requested
2447 * flags - ATTR_NOACLCHECK (CIFS server context)
2448 * cr - credentials of caller.
2450 * OUT: vap - attribute values.
2452 * RETURN: 0 (always succeeds)
2456 zfs_getattr(struct inode
*ip
, vattr_t
*vap
, int flags
, cred_t
*cr
)
2458 znode_t
*zp
= ITOZ(ip
);
2459 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
2462 uint64_t atime
[2], mtime
[2], ctime
[2];
2463 xvattr_t
*xvap
= (xvattr_t
*)vap
; /* vap may be an xvattr_t * */
2464 xoptattr_t
*xoap
= NULL
;
2465 boolean_t skipaclchk
= (flags
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
2466 sa_bulk_attr_t bulk
[3];
2472 zfs_fuid_map_ids(zp
, cr
, &vap
->va_uid
, &vap
->va_gid
);
2474 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_ATIME(zfsvfs
), NULL
, &atime
, 16);
2475 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
, &mtime
, 16);
2476 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
, &ctime
, 16);
2478 if ((error
= sa_bulk_lookup(zp
->z_sa_hdl
, bulk
, count
)) != 0) {
2484 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2485 * Also, if we are the owner don't bother, since owner should
2486 * always be allowed to read basic attributes of file.
2488 if (!(zp
->z_pflags
& ZFS_ACL_TRIVIAL
) &&
2489 (vap
->va_uid
!= crgetuid(cr
))) {
2490 if ((error
= zfs_zaccess(zp
, ACE_READ_ATTRIBUTES
, 0,
2498 * Return all attributes. It's cheaper to provide the answer
2499 * than to determine whether we were asked the question.
2502 mutex_enter(&zp
->z_lock
);
2503 vap
->va_type
= vn_mode_to_vtype(zp
->z_mode
);
2504 vap
->va_mode
= zp
->z_mode
;
2505 vap
->va_fsid
= ZTOI(zp
)->i_sb
->s_dev
;
2506 vap
->va_nodeid
= zp
->z_id
;
2507 if ((zp
->z_id
== zfsvfs
->z_root
) && zfs_show_ctldir(zp
))
2508 links
= ZTOI(zp
)->i_nlink
+ 1;
2510 links
= ZTOI(zp
)->i_nlink
;
2511 vap
->va_nlink
= MIN(links
, ZFS_LINK_MAX
);
2512 vap
->va_size
= i_size_read(ip
);
2513 vap
->va_rdev
= ip
->i_rdev
;
2514 vap
->va_seq
= ip
->i_generation
;
2517 * Add in any requested optional attributes and the create time.
2518 * Also set the corresponding bits in the returned attribute bitmap.
2520 if ((xoap
= xva_getxoptattr(xvap
)) != NULL
&& zfsvfs
->z_use_fuids
) {
2521 if (XVA_ISSET_REQ(xvap
, XAT_ARCHIVE
)) {
2523 ((zp
->z_pflags
& ZFS_ARCHIVE
) != 0);
2524 XVA_SET_RTN(xvap
, XAT_ARCHIVE
);
2527 if (XVA_ISSET_REQ(xvap
, XAT_READONLY
)) {
2528 xoap
->xoa_readonly
=
2529 ((zp
->z_pflags
& ZFS_READONLY
) != 0);
2530 XVA_SET_RTN(xvap
, XAT_READONLY
);
2533 if (XVA_ISSET_REQ(xvap
, XAT_SYSTEM
)) {
2535 ((zp
->z_pflags
& ZFS_SYSTEM
) != 0);
2536 XVA_SET_RTN(xvap
, XAT_SYSTEM
);
2539 if (XVA_ISSET_REQ(xvap
, XAT_HIDDEN
)) {
2541 ((zp
->z_pflags
& ZFS_HIDDEN
) != 0);
2542 XVA_SET_RTN(xvap
, XAT_HIDDEN
);
2545 if (XVA_ISSET_REQ(xvap
, XAT_NOUNLINK
)) {
2546 xoap
->xoa_nounlink
=
2547 ((zp
->z_pflags
& ZFS_NOUNLINK
) != 0);
2548 XVA_SET_RTN(xvap
, XAT_NOUNLINK
);
2551 if (XVA_ISSET_REQ(xvap
, XAT_IMMUTABLE
)) {
2552 xoap
->xoa_immutable
=
2553 ((zp
->z_pflags
& ZFS_IMMUTABLE
) != 0);
2554 XVA_SET_RTN(xvap
, XAT_IMMUTABLE
);
2557 if (XVA_ISSET_REQ(xvap
, XAT_APPENDONLY
)) {
2558 xoap
->xoa_appendonly
=
2559 ((zp
->z_pflags
& ZFS_APPENDONLY
) != 0);
2560 XVA_SET_RTN(xvap
, XAT_APPENDONLY
);
2563 if (XVA_ISSET_REQ(xvap
, XAT_NODUMP
)) {
2565 ((zp
->z_pflags
& ZFS_NODUMP
) != 0);
2566 XVA_SET_RTN(xvap
, XAT_NODUMP
);
2569 if (XVA_ISSET_REQ(xvap
, XAT_OPAQUE
)) {
2571 ((zp
->z_pflags
& ZFS_OPAQUE
) != 0);
2572 XVA_SET_RTN(xvap
, XAT_OPAQUE
);
2575 if (XVA_ISSET_REQ(xvap
, XAT_AV_QUARANTINED
)) {
2576 xoap
->xoa_av_quarantined
=
2577 ((zp
->z_pflags
& ZFS_AV_QUARANTINED
) != 0);
2578 XVA_SET_RTN(xvap
, XAT_AV_QUARANTINED
);
2581 if (XVA_ISSET_REQ(xvap
, XAT_AV_MODIFIED
)) {
2582 xoap
->xoa_av_modified
=
2583 ((zp
->z_pflags
& ZFS_AV_MODIFIED
) != 0);
2584 XVA_SET_RTN(xvap
, XAT_AV_MODIFIED
);
2587 if (XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
) &&
2588 S_ISREG(ip
->i_mode
)) {
2589 zfs_sa_get_scanstamp(zp
, xvap
);
2592 if (XVA_ISSET_REQ(xvap
, XAT_CREATETIME
)) {
2595 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_CRTIME(zfsvfs
),
2596 times
, sizeof (times
));
2597 ZFS_TIME_DECODE(&xoap
->xoa_createtime
, times
);
2598 XVA_SET_RTN(xvap
, XAT_CREATETIME
);
2601 if (XVA_ISSET_REQ(xvap
, XAT_REPARSE
)) {
2602 xoap
->xoa_reparse
= ((zp
->z_pflags
& ZFS_REPARSE
) != 0);
2603 XVA_SET_RTN(xvap
, XAT_REPARSE
);
2605 if (XVA_ISSET_REQ(xvap
, XAT_GEN
)) {
2606 xoap
->xoa_generation
= ip
->i_generation
;
2607 XVA_SET_RTN(xvap
, XAT_GEN
);
2610 if (XVA_ISSET_REQ(xvap
, XAT_OFFLINE
)) {
2612 ((zp
->z_pflags
& ZFS_OFFLINE
) != 0);
2613 XVA_SET_RTN(xvap
, XAT_OFFLINE
);
2616 if (XVA_ISSET_REQ(xvap
, XAT_SPARSE
)) {
2618 ((zp
->z_pflags
& ZFS_SPARSE
) != 0);
2619 XVA_SET_RTN(xvap
, XAT_SPARSE
);
2622 if (XVA_ISSET_REQ(xvap
, XAT_PROJINHERIT
)) {
2623 xoap
->xoa_projinherit
=
2624 ((zp
->z_pflags
& ZFS_PROJINHERIT
) != 0);
2625 XVA_SET_RTN(xvap
, XAT_PROJINHERIT
);
2628 if (XVA_ISSET_REQ(xvap
, XAT_PROJID
)) {
2629 xoap
->xoa_projid
= zp
->z_projid
;
2630 XVA_SET_RTN(xvap
, XAT_PROJID
);
2634 ZFS_TIME_DECODE(&vap
->va_atime
, atime
);
2635 ZFS_TIME_DECODE(&vap
->va_mtime
, mtime
);
2636 ZFS_TIME_DECODE(&vap
->va_ctime
, ctime
);
2638 mutex_exit(&zp
->z_lock
);
2640 sa_object_size(zp
->z_sa_hdl
, &vap
->va_blksize
, &vap
->va_nblocks
);
2642 if (zp
->z_blksz
== 0) {
2644 * Block size hasn't been set; suggest maximal I/O transfers.
2646 vap
->va_blksize
= zfsvfs
->z_max_blksz
;
2654 * Get the basic file attributes and place them in the provided kstat
2655 * structure. The inode is assumed to be the authoritative source
2656 * for most of the attributes. However, the znode currently has the
2657 * authoritative atime, blksize, and block count.
2659 * IN: ip - inode of file.
2661 * OUT: sp - kstat values.
2663 * RETURN: 0 (always succeeds)
2667 zfs_getattr_fast(struct inode
*ip
, struct kstat
*sp
)
2669 znode_t
*zp
= ITOZ(ip
);
2670 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
2672 u_longlong_t nblocks
;
2677 mutex_enter(&zp
->z_lock
);
2679 generic_fillattr(ip
, sp
);
2681 sa_object_size(zp
->z_sa_hdl
, &blksize
, &nblocks
);
2682 sp
->blksize
= blksize
;
2683 sp
->blocks
= nblocks
;
2685 if (unlikely(zp
->z_blksz
== 0)) {
2687 * Block size hasn't been set; suggest maximal I/O transfers.
2689 sp
->blksize
= zfsvfs
->z_max_blksz
;
2692 mutex_exit(&zp
->z_lock
);
2695 * Required to prevent NFS client from detecting different inode
2696 * numbers of snapshot root dentry before and after snapshot mount.
2698 if (zfsvfs
->z_issnap
) {
2699 if (ip
->i_sb
->s_root
->d_inode
== ip
)
2700 sp
->ino
= ZFSCTL_INO_SNAPDIRS
-
2701 dmu_objset_id(zfsvfs
->z_os
);
2710 * For the operation of changing file's user/group/project, we need to
2711 * handle not only the main object that is assigned to the file directly,
2712 * but also the ones that are used by the file via hidden xattr directory.
2714 * Because the xattr directory may contains many EA entries, as to it may
2715 * be impossible to change all of them via the transaction of changing the
2716 * main object's user/group/project attributes. Then we have to change them
2717 * via other multiple independent transactions one by one. It may be not good
2718 * solution, but we have no better idea yet.
2721 zfs_setattr_dir(znode_t
*dzp
)
2723 struct inode
*dxip
= ZTOI(dzp
);
2724 struct inode
*xip
= NULL
;
2725 zfsvfs_t
*zfsvfs
= ITOZSB(dxip
);
2726 objset_t
*os
= zfsvfs
->z_os
;
2728 zap_attribute_t zap
;
2731 dmu_tx_t
*tx
= NULL
;
2733 sa_bulk_attr_t bulk
[4];
2737 zap_cursor_init(&zc
, os
, dzp
->z_id
);
2738 while ((err
= zap_cursor_retrieve(&zc
, &zap
)) == 0) {
2739 if (zap
.za_integer_length
!= 8 || zap
.za_num_integers
!= 1) {
2744 err
= zfs_dirent_lock(&dl
, dzp
, (char *)zap
.za_name
, &zp
,
2745 ZEXISTS
, NULL
, NULL
);
2752 if (KUID_TO_SUID(xip
->i_uid
) == KUID_TO_SUID(dxip
->i_uid
) &&
2753 KGID_TO_SGID(xip
->i_gid
) == KGID_TO_SGID(dxip
->i_gid
) &&
2754 zp
->z_projid
== dzp
->z_projid
)
2757 tx
= dmu_tx_create(os
);
2758 if (!(zp
->z_pflags
& ZFS_PROJID
))
2759 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_TRUE
);
2761 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
2763 err
= dmu_tx_assign(tx
, TXG_WAIT
);
2767 mutex_enter(&dzp
->z_lock
);
2769 if (KUID_TO_SUID(xip
->i_uid
) != KUID_TO_SUID(dxip
->i_uid
)) {
2770 xip
->i_uid
= dxip
->i_uid
;
2771 uid
= zfs_uid_read(dxip
);
2772 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_UID(zfsvfs
), NULL
,
2773 &uid
, sizeof (uid
));
2776 if (KGID_TO_SGID(xip
->i_gid
) != KGID_TO_SGID(dxip
->i_gid
)) {
2777 xip
->i_gid
= dxip
->i_gid
;
2778 gid
= zfs_gid_read(dxip
);
2779 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_GID(zfsvfs
), NULL
,
2780 &gid
, sizeof (gid
));
2783 if (zp
->z_projid
!= dzp
->z_projid
) {
2784 if (!(zp
->z_pflags
& ZFS_PROJID
)) {
2785 zp
->z_pflags
|= ZFS_PROJID
;
2786 SA_ADD_BULK_ATTR(bulk
, count
,
2787 SA_ZPL_FLAGS(zfsvfs
), NULL
, &zp
->z_pflags
,
2788 sizeof (zp
->z_pflags
));
2791 zp
->z_projid
= dzp
->z_projid
;
2792 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_PROJID(zfsvfs
),
2793 NULL
, &zp
->z_projid
, sizeof (zp
->z_projid
));
2796 mutex_exit(&dzp
->z_lock
);
2798 if (likely(count
> 0)) {
2799 err
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, count
, tx
);
2805 if (err
!= 0 && err
!= ENOENT
)
2812 zfs_dirent_unlock(dl
);
2814 zap_cursor_advance(&zc
);
2821 zfs_dirent_unlock(dl
);
2823 zap_cursor_fini(&zc
);
2825 return (err
== ENOENT
? 0 : err
);
2829 * Set the file attributes to the values contained in the
2832 * IN: ip - inode of file to be modified.
2833 * vap - new attribute values.
2834 * If ATTR_XVATTR set, then optional attrs are being set
2835 * flags - ATTR_UTIME set if non-default time values provided.
2836 * - ATTR_NOACLCHECK (CIFS context only).
2837 * cr - credentials of caller.
2839 * RETURN: 0 if success
2840 * error code if failure
2843 * ip - ctime updated, mtime updated if size changed.
2847 zfs_setattr(struct inode
*ip
, vattr_t
*vap
, int flags
, cred_t
*cr
)
2849 znode_t
*zp
= ITOZ(ip
);
2850 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
2851 objset_t
*os
= zfsvfs
->z_os
;
2855 xvattr_t
*tmpxvattr
;
2856 uint_t mask
= vap
->va_mask
;
2857 uint_t saved_mask
= 0;
2860 uint64_t new_kuid
= 0, new_kgid
= 0, new_uid
, new_gid
;
2862 uint64_t mtime
[2], ctime
[2], atime
[2];
2863 uint64_t projid
= ZFS_INVALID_PROJID
;
2865 int need_policy
= FALSE
;
2867 zfs_fuid_info_t
*fuidp
= NULL
;
2868 xvattr_t
*xvap
= (xvattr_t
*)vap
; /* vap may be an xvattr_t * */
2871 boolean_t skipaclchk
= (flags
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
2872 boolean_t fuid_dirtied
= B_FALSE
;
2873 boolean_t handle_eadir
= B_FALSE
;
2874 sa_bulk_attr_t
*bulk
, *xattr_bulk
;
2875 int count
= 0, xattr_count
= 0, bulks
= 8;
2884 * If this is a xvattr_t, then get a pointer to the structure of
2885 * optional attributes. If this is NULL, then we have a vattr_t.
2887 xoap
= xva_getxoptattr(xvap
);
2888 if (xoap
!= NULL
&& (mask
& ATTR_XVATTR
)) {
2889 if (XVA_ISSET_REQ(xvap
, XAT_PROJID
)) {
2890 if (!dmu_objset_projectquota_enabled(os
) ||
2891 (!S_ISREG(ip
->i_mode
) && !S_ISDIR(ip
->i_mode
))) {
2893 return (SET_ERROR(ENOTSUP
));
2896 projid
= xoap
->xoa_projid
;
2897 if (unlikely(projid
== ZFS_INVALID_PROJID
)) {
2899 return (SET_ERROR(EINVAL
));
2902 if (projid
== zp
->z_projid
&& zp
->z_pflags
& ZFS_PROJID
)
2903 projid
= ZFS_INVALID_PROJID
;
2908 if (XVA_ISSET_REQ(xvap
, XAT_PROJINHERIT
) &&
2909 (xoap
->xoa_projinherit
!=
2910 ((zp
->z_pflags
& ZFS_PROJINHERIT
) != 0)) &&
2911 (!dmu_objset_projectquota_enabled(os
) ||
2912 (!S_ISREG(ip
->i_mode
) && !S_ISDIR(ip
->i_mode
)))) {
2914 return (SET_ERROR(ENOTSUP
));
2918 zilog
= zfsvfs
->z_log
;
2921 * Make sure that if we have ephemeral uid/gid or xvattr specified
2922 * that file system is at proper version level
2925 if (zfsvfs
->z_use_fuids
== B_FALSE
&&
2926 (((mask
& ATTR_UID
) && IS_EPHEMERAL(vap
->va_uid
)) ||
2927 ((mask
& ATTR_GID
) && IS_EPHEMERAL(vap
->va_gid
)) ||
2928 (mask
& ATTR_XVATTR
))) {
2930 return (SET_ERROR(EINVAL
));
2933 if (mask
& ATTR_SIZE
&& S_ISDIR(ip
->i_mode
)) {
2935 return (SET_ERROR(EISDIR
));
2938 if (mask
& ATTR_SIZE
&& !S_ISREG(ip
->i_mode
) && !S_ISFIFO(ip
->i_mode
)) {
2940 return (SET_ERROR(EINVAL
));
2943 tmpxvattr
= kmem_alloc(sizeof (xvattr_t
), KM_SLEEP
);
2944 xva_init(tmpxvattr
);
2946 bulk
= kmem_alloc(sizeof (sa_bulk_attr_t
) * bulks
, KM_SLEEP
);
2947 xattr_bulk
= kmem_alloc(sizeof (sa_bulk_attr_t
) * bulks
, KM_SLEEP
);
2950 * Immutable files can only alter immutable bit and atime
2952 if ((zp
->z_pflags
& ZFS_IMMUTABLE
) &&
2953 ((mask
& (ATTR_SIZE
|ATTR_UID
|ATTR_GID
|ATTR_MTIME
|ATTR_MODE
)) ||
2954 ((mask
& ATTR_XVATTR
) && XVA_ISSET_REQ(xvap
, XAT_CREATETIME
)))) {
2955 err
= SET_ERROR(EPERM
);
2959 if ((mask
& ATTR_SIZE
) && (zp
->z_pflags
& ZFS_READONLY
)) {
2960 err
= SET_ERROR(EPERM
);
2965 * Verify timestamps doesn't overflow 32 bits.
2966 * ZFS can handle large timestamps, but 32bit syscalls can't
2967 * handle times greater than 2039. This check should be removed
2968 * once large timestamps are fully supported.
2970 if (mask
& (ATTR_ATIME
| ATTR_MTIME
)) {
2971 if (((mask
& ATTR_ATIME
) &&
2972 TIMESPEC_OVERFLOW(&vap
->va_atime
)) ||
2973 ((mask
& ATTR_MTIME
) &&
2974 TIMESPEC_OVERFLOW(&vap
->va_mtime
))) {
2975 err
= SET_ERROR(EOVERFLOW
);
2984 /* Can this be moved to before the top label? */
2985 if (zfs_is_readonly(zfsvfs
)) {
2986 err
= SET_ERROR(EROFS
);
2991 * First validate permissions
2994 if (mask
& ATTR_SIZE
) {
2995 err
= zfs_zaccess(zp
, ACE_WRITE_DATA
, 0, skipaclchk
, cr
);
3000 * XXX - Note, we are not providing any open
3001 * mode flags here (like FNDELAY), so we may
3002 * block if there are locks present... this
3003 * should be addressed in openat().
3005 /* XXX - would it be OK to generate a log record here? */
3006 err
= zfs_freesp(zp
, vap
->va_size
, 0, 0, FALSE
);
3011 if (mask
& (ATTR_ATIME
|ATTR_MTIME
) ||
3012 ((mask
& ATTR_XVATTR
) && (XVA_ISSET_REQ(xvap
, XAT_HIDDEN
) ||
3013 XVA_ISSET_REQ(xvap
, XAT_READONLY
) ||
3014 XVA_ISSET_REQ(xvap
, XAT_ARCHIVE
) ||
3015 XVA_ISSET_REQ(xvap
, XAT_OFFLINE
) ||
3016 XVA_ISSET_REQ(xvap
, XAT_SPARSE
) ||
3017 XVA_ISSET_REQ(xvap
, XAT_CREATETIME
) ||
3018 XVA_ISSET_REQ(xvap
, XAT_SYSTEM
)))) {
3019 need_policy
= zfs_zaccess(zp
, ACE_WRITE_ATTRIBUTES
, 0,
3023 if (mask
& (ATTR_UID
|ATTR_GID
)) {
3024 int idmask
= (mask
& (ATTR_UID
|ATTR_GID
));
3029 * NOTE: even if a new mode is being set,
3030 * we may clear S_ISUID/S_ISGID bits.
3033 if (!(mask
& ATTR_MODE
))
3034 vap
->va_mode
= zp
->z_mode
;
3037 * Take ownership or chgrp to group we are a member of
3040 take_owner
= (mask
& ATTR_UID
) && (vap
->va_uid
== crgetuid(cr
));
3041 take_group
= (mask
& ATTR_GID
) &&
3042 zfs_groupmember(zfsvfs
, vap
->va_gid
, cr
);
3045 * If both ATTR_UID and ATTR_GID are set then take_owner and
3046 * take_group must both be set in order to allow taking
3049 * Otherwise, send the check through secpolicy_vnode_setattr()
3053 if (((idmask
== (ATTR_UID
|ATTR_GID
)) &&
3054 take_owner
&& take_group
) ||
3055 ((idmask
== ATTR_UID
) && take_owner
) ||
3056 ((idmask
== ATTR_GID
) && take_group
)) {
3057 if (zfs_zaccess(zp
, ACE_WRITE_OWNER
, 0,
3058 skipaclchk
, cr
) == 0) {
3060 * Remove setuid/setgid for non-privileged users
3062 (void) secpolicy_setid_clear(vap
, cr
);
3063 trim_mask
= (mask
& (ATTR_UID
|ATTR_GID
));
3072 mutex_enter(&zp
->z_lock
);
3073 oldva
.va_mode
= zp
->z_mode
;
3074 zfs_fuid_map_ids(zp
, cr
, &oldva
.va_uid
, &oldva
.va_gid
);
3075 if (mask
& ATTR_XVATTR
) {
3077 * Update xvattr mask to include only those attributes
3078 * that are actually changing.
3080 * the bits will be restored prior to actually setting
3081 * the attributes so the caller thinks they were set.
3083 if (XVA_ISSET_REQ(xvap
, XAT_APPENDONLY
)) {
3084 if (xoap
->xoa_appendonly
!=
3085 ((zp
->z_pflags
& ZFS_APPENDONLY
) != 0)) {
3088 XVA_CLR_REQ(xvap
, XAT_APPENDONLY
);
3089 XVA_SET_REQ(tmpxvattr
, XAT_APPENDONLY
);
3093 if (XVA_ISSET_REQ(xvap
, XAT_PROJINHERIT
)) {
3094 if (xoap
->xoa_projinherit
!=
3095 ((zp
->z_pflags
& ZFS_PROJINHERIT
) != 0)) {
3098 XVA_CLR_REQ(xvap
, XAT_PROJINHERIT
);
3099 XVA_SET_REQ(tmpxvattr
, XAT_PROJINHERIT
);
3103 if (XVA_ISSET_REQ(xvap
, XAT_NOUNLINK
)) {
3104 if (xoap
->xoa_nounlink
!=
3105 ((zp
->z_pflags
& ZFS_NOUNLINK
) != 0)) {
3108 XVA_CLR_REQ(xvap
, XAT_NOUNLINK
);
3109 XVA_SET_REQ(tmpxvattr
, XAT_NOUNLINK
);
3113 if (XVA_ISSET_REQ(xvap
, XAT_IMMUTABLE
)) {
3114 if (xoap
->xoa_immutable
!=
3115 ((zp
->z_pflags
& ZFS_IMMUTABLE
) != 0)) {
3118 XVA_CLR_REQ(xvap
, XAT_IMMUTABLE
);
3119 XVA_SET_REQ(tmpxvattr
, XAT_IMMUTABLE
);
3123 if (XVA_ISSET_REQ(xvap
, XAT_NODUMP
)) {
3124 if (xoap
->xoa_nodump
!=
3125 ((zp
->z_pflags
& ZFS_NODUMP
) != 0)) {
3128 XVA_CLR_REQ(xvap
, XAT_NODUMP
);
3129 XVA_SET_REQ(tmpxvattr
, XAT_NODUMP
);
3133 if (XVA_ISSET_REQ(xvap
, XAT_AV_MODIFIED
)) {
3134 if (xoap
->xoa_av_modified
!=
3135 ((zp
->z_pflags
& ZFS_AV_MODIFIED
) != 0)) {
3138 XVA_CLR_REQ(xvap
, XAT_AV_MODIFIED
);
3139 XVA_SET_REQ(tmpxvattr
, XAT_AV_MODIFIED
);
3143 if (XVA_ISSET_REQ(xvap
, XAT_AV_QUARANTINED
)) {
3144 if ((!S_ISREG(ip
->i_mode
) &&
3145 xoap
->xoa_av_quarantined
) ||
3146 xoap
->xoa_av_quarantined
!=
3147 ((zp
->z_pflags
& ZFS_AV_QUARANTINED
) != 0)) {
3150 XVA_CLR_REQ(xvap
, XAT_AV_QUARANTINED
);
3151 XVA_SET_REQ(tmpxvattr
, XAT_AV_QUARANTINED
);
3155 if (XVA_ISSET_REQ(xvap
, XAT_REPARSE
)) {
3156 mutex_exit(&zp
->z_lock
);
3157 err
= SET_ERROR(EPERM
);
3161 if (need_policy
== FALSE
&&
3162 (XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
) ||
3163 XVA_ISSET_REQ(xvap
, XAT_OPAQUE
))) {
3168 mutex_exit(&zp
->z_lock
);
3170 if (mask
& ATTR_MODE
) {
3171 if (zfs_zaccess(zp
, ACE_WRITE_ACL
, 0, skipaclchk
, cr
) == 0) {
3172 err
= secpolicy_setid_setsticky_clear(ip
, vap
,
3177 trim_mask
|= ATTR_MODE
;
3185 * If trim_mask is set then take ownership
3186 * has been granted or write_acl is present and user
3187 * has the ability to modify mode. In that case remove
3188 * UID|GID and or MODE from mask so that
3189 * secpolicy_vnode_setattr() doesn't revoke it.
3193 saved_mask
= vap
->va_mask
;
3194 vap
->va_mask
&= ~trim_mask
;
3196 err
= secpolicy_vnode_setattr(cr
, ip
, vap
, &oldva
, flags
,
3197 (int (*)(void *, int, cred_t
*))zfs_zaccess_unix
, zp
);
3202 vap
->va_mask
|= saved_mask
;
3206 * secpolicy_vnode_setattr, or take ownership may have
3209 mask
= vap
->va_mask
;
3211 if ((mask
& (ATTR_UID
| ATTR_GID
)) || projid
!= ZFS_INVALID_PROJID
) {
3212 handle_eadir
= B_TRUE
;
3213 err
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_XATTR(zfsvfs
),
3214 &xattr_obj
, sizeof (xattr_obj
));
3216 if (err
== 0 && xattr_obj
) {
3217 err
= zfs_zget(ZTOZSB(zp
), xattr_obj
, &attrzp
);
3221 if (mask
& ATTR_UID
) {
3222 new_kuid
= zfs_fuid_create(zfsvfs
,
3223 (uint64_t)vap
->va_uid
, cr
, ZFS_OWNER
, &fuidp
);
3224 if (new_kuid
!= KUID_TO_SUID(ZTOI(zp
)->i_uid
) &&
3225 zfs_id_overquota(zfsvfs
, DMU_USERUSED_OBJECT
,
3229 err
= SET_ERROR(EDQUOT
);
3234 if (mask
& ATTR_GID
) {
3235 new_kgid
= zfs_fuid_create(zfsvfs
,
3236 (uint64_t)vap
->va_gid
, cr
, ZFS_GROUP
, &fuidp
);
3237 if (new_kgid
!= KGID_TO_SGID(ZTOI(zp
)->i_gid
) &&
3238 zfs_id_overquota(zfsvfs
, DMU_GROUPUSED_OBJECT
,
3242 err
= SET_ERROR(EDQUOT
);
3247 if (projid
!= ZFS_INVALID_PROJID
&&
3248 zfs_id_overquota(zfsvfs
, DMU_PROJECTUSED_OBJECT
, projid
)) {
3255 tx
= dmu_tx_create(os
);
3257 if (mask
& ATTR_MODE
) {
3258 uint64_t pmode
= zp
->z_mode
;
3260 new_mode
= (pmode
& S_IFMT
) | (vap
->va_mode
& ~S_IFMT
);
3262 zfs_acl_chmod_setattr(zp
, &aclp
, new_mode
);
3264 mutex_enter(&zp
->z_lock
);
3265 if (!zp
->z_is_sa
&& ((acl_obj
= zfs_external_acl(zp
)) != 0)) {
3267 * Are we upgrading ACL from old V0 format
3270 if (zfsvfs
->z_version
>= ZPL_VERSION_FUID
&&
3271 zfs_znode_acl_version(zp
) ==
3272 ZFS_ACL_VERSION_INITIAL
) {
3273 dmu_tx_hold_free(tx
, acl_obj
, 0,
3275 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
,
3276 0, aclp
->z_acl_bytes
);
3278 dmu_tx_hold_write(tx
, acl_obj
, 0,
3281 } else if (!zp
->z_is_sa
&& aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
3282 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
,
3283 0, aclp
->z_acl_bytes
);
3285 mutex_exit(&zp
->z_lock
);
3286 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_TRUE
);
3288 if (((mask
& ATTR_XVATTR
) &&
3289 XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
)) ||
3290 (projid
!= ZFS_INVALID_PROJID
&&
3291 !(zp
->z_pflags
& ZFS_PROJID
)))
3292 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_TRUE
);
3294 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
3298 dmu_tx_hold_sa(tx
, attrzp
->z_sa_hdl
, B_FALSE
);
3301 fuid_dirtied
= zfsvfs
->z_fuid_dirty
;
3303 zfs_fuid_txhold(zfsvfs
, tx
);
3305 zfs_sa_upgrade_txholds(tx
, zp
);
3307 err
= dmu_tx_assign(tx
, TXG_WAIT
);
3313 * Set each attribute requested.
3314 * We group settings according to the locks they need to acquire.
3316 * Note: you cannot set ctime directly, although it will be
3317 * updated as a side-effect of calling this function.
3320 if (projid
!= ZFS_INVALID_PROJID
&& !(zp
->z_pflags
& ZFS_PROJID
)) {
3322 * For the existed object that is upgraded from old system,
3323 * its on-disk layout has no slot for the project ID attribute.
3324 * But quota accounting logic needs to access related slots by
3325 * offset directly. So we need to adjust old objects' layout
3326 * to make the project ID to some unified and fixed offset.
3329 err
= sa_add_projid(attrzp
->z_sa_hdl
, tx
, projid
);
3331 err
= sa_add_projid(zp
->z_sa_hdl
, tx
, projid
);
3333 if (unlikely(err
== EEXIST
))
3338 projid
= ZFS_INVALID_PROJID
;
3341 if (mask
& (ATTR_UID
|ATTR_GID
|ATTR_MODE
))
3342 mutex_enter(&zp
->z_acl_lock
);
3343 mutex_enter(&zp
->z_lock
);
3345 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zfsvfs
), NULL
,
3346 &zp
->z_pflags
, sizeof (zp
->z_pflags
));
3349 if (mask
& (ATTR_UID
|ATTR_GID
|ATTR_MODE
))
3350 mutex_enter(&attrzp
->z_acl_lock
);
3351 mutex_enter(&attrzp
->z_lock
);
3352 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
3353 SA_ZPL_FLAGS(zfsvfs
), NULL
, &attrzp
->z_pflags
,
3354 sizeof (attrzp
->z_pflags
));
3355 if (projid
!= ZFS_INVALID_PROJID
) {
3356 attrzp
->z_projid
= projid
;
3357 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
3358 SA_ZPL_PROJID(zfsvfs
), NULL
, &attrzp
->z_projid
,
3359 sizeof (attrzp
->z_projid
));
3363 if (mask
& (ATTR_UID
|ATTR_GID
)) {
3365 if (mask
& ATTR_UID
) {
3366 ZTOI(zp
)->i_uid
= SUID_TO_KUID(new_kuid
);
3367 new_uid
= zfs_uid_read(ZTOI(zp
));
3368 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_UID(zfsvfs
), NULL
,
3369 &new_uid
, sizeof (new_uid
));
3371 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
3372 SA_ZPL_UID(zfsvfs
), NULL
, &new_uid
,
3374 ZTOI(attrzp
)->i_uid
= SUID_TO_KUID(new_uid
);
3378 if (mask
& ATTR_GID
) {
3379 ZTOI(zp
)->i_gid
= SGID_TO_KGID(new_kgid
);
3380 new_gid
= zfs_gid_read(ZTOI(zp
));
3381 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_GID(zfsvfs
),
3382 NULL
, &new_gid
, sizeof (new_gid
));
3384 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
3385 SA_ZPL_GID(zfsvfs
), NULL
, &new_gid
,
3387 ZTOI(attrzp
)->i_gid
= SGID_TO_KGID(new_kgid
);
3390 if (!(mask
& ATTR_MODE
)) {
3391 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MODE(zfsvfs
),
3392 NULL
, &new_mode
, sizeof (new_mode
));
3393 new_mode
= zp
->z_mode
;
3395 err
= zfs_acl_chown_setattr(zp
);
3398 err
= zfs_acl_chown_setattr(attrzp
);
3403 if (mask
& ATTR_MODE
) {
3404 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MODE(zfsvfs
), NULL
,
3405 &new_mode
, sizeof (new_mode
));
3406 zp
->z_mode
= ZTOI(zp
)->i_mode
= new_mode
;
3407 ASSERT3P(aclp
, !=, NULL
);
3408 err
= zfs_aclset_common(zp
, aclp
, cr
, tx
);
3410 if (zp
->z_acl_cached
)
3411 zfs_acl_free(zp
->z_acl_cached
);
3412 zp
->z_acl_cached
= aclp
;
3416 if ((mask
& ATTR_ATIME
) || zp
->z_atime_dirty
) {
3417 zp
->z_atime_dirty
= 0;
3418 ZFS_TIME_ENCODE(&ip
->i_atime
, atime
);
3419 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_ATIME(zfsvfs
), NULL
,
3420 &atime
, sizeof (atime
));
3423 if (mask
& (ATTR_MTIME
| ATTR_SIZE
)) {
3424 ZFS_TIME_ENCODE(&vap
->va_mtime
, mtime
);
3425 ZTOI(zp
)->i_mtime
= zpl_inode_timespec_trunc(vap
->va_mtime
,
3426 ZTOI(zp
)->i_sb
->s_time_gran
);
3428 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
,
3429 mtime
, sizeof (mtime
));
3432 if (mask
& (ATTR_CTIME
| ATTR_SIZE
)) {
3433 ZFS_TIME_ENCODE(&vap
->va_ctime
, ctime
);
3434 ZTOI(zp
)->i_ctime
= zpl_inode_timespec_trunc(vap
->va_ctime
,
3435 ZTOI(zp
)->i_sb
->s_time_gran
);
3436 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
,
3437 ctime
, sizeof (ctime
));
3440 if (projid
!= ZFS_INVALID_PROJID
) {
3441 zp
->z_projid
= projid
;
3442 SA_ADD_BULK_ATTR(bulk
, count
,
3443 SA_ZPL_PROJID(zfsvfs
), NULL
, &zp
->z_projid
,
3444 sizeof (zp
->z_projid
));
3447 if (attrzp
&& mask
) {
3448 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
3449 SA_ZPL_CTIME(zfsvfs
), NULL
, &ctime
,
3454 * Do this after setting timestamps to prevent timestamp
3455 * update from toggling bit
3458 if (xoap
&& (mask
& ATTR_XVATTR
)) {
3461 * restore trimmed off masks
3462 * so that return masks can be set for caller.
3465 if (XVA_ISSET_REQ(tmpxvattr
, XAT_APPENDONLY
)) {
3466 XVA_SET_REQ(xvap
, XAT_APPENDONLY
);
3468 if (XVA_ISSET_REQ(tmpxvattr
, XAT_NOUNLINK
)) {
3469 XVA_SET_REQ(xvap
, XAT_NOUNLINK
);
3471 if (XVA_ISSET_REQ(tmpxvattr
, XAT_IMMUTABLE
)) {
3472 XVA_SET_REQ(xvap
, XAT_IMMUTABLE
);
3474 if (XVA_ISSET_REQ(tmpxvattr
, XAT_NODUMP
)) {
3475 XVA_SET_REQ(xvap
, XAT_NODUMP
);
3477 if (XVA_ISSET_REQ(tmpxvattr
, XAT_AV_MODIFIED
)) {
3478 XVA_SET_REQ(xvap
, XAT_AV_MODIFIED
);
3480 if (XVA_ISSET_REQ(tmpxvattr
, XAT_AV_QUARANTINED
)) {
3481 XVA_SET_REQ(xvap
, XAT_AV_QUARANTINED
);
3483 if (XVA_ISSET_REQ(tmpxvattr
, XAT_PROJINHERIT
)) {
3484 XVA_SET_REQ(xvap
, XAT_PROJINHERIT
);
3487 if (XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
))
3488 ASSERT(S_ISREG(ip
->i_mode
));
3490 zfs_xvattr_set(zp
, xvap
, tx
);
3494 zfs_fuid_sync(zfsvfs
, tx
);
3497 zfs_log_setattr(zilog
, tx
, TX_SETATTR
, zp
, vap
, mask
, fuidp
);
3499 mutex_exit(&zp
->z_lock
);
3500 if (mask
& (ATTR_UID
|ATTR_GID
|ATTR_MODE
))
3501 mutex_exit(&zp
->z_acl_lock
);
3504 if (mask
& (ATTR_UID
|ATTR_GID
|ATTR_MODE
))
3505 mutex_exit(&attrzp
->z_acl_lock
);
3506 mutex_exit(&attrzp
->z_lock
);
3509 if (err
== 0 && xattr_count
> 0) {
3510 err2
= sa_bulk_update(attrzp
->z_sa_hdl
, xattr_bulk
,
3519 zfs_fuid_info_free(fuidp
);
3527 if (err
== ERESTART
)
3531 err2
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, count
, tx
);
3534 if (err2
== 0 && handle_eadir
)
3535 err2
= zfs_setattr_dir(attrzp
);
3538 zfs_inode_update(zp
);
3542 if (os
->os_sync
== ZFS_SYNC_ALWAYS
)
3543 zil_commit(zilog
, 0);
3546 kmem_free(xattr_bulk
, sizeof (sa_bulk_attr_t
) * bulks
);
3547 kmem_free(bulk
, sizeof (sa_bulk_attr_t
) * bulks
);
3548 kmem_free(tmpxvattr
, sizeof (xvattr_t
));
3553 typedef struct zfs_zlock
{
3554 krwlock_t
*zl_rwlock
; /* lock we acquired */
3555 znode_t
*zl_znode
; /* znode we held */
3556 struct zfs_zlock
*zl_next
; /* next in list */
3560 * Drop locks and release vnodes that were held by zfs_rename_lock().
3563 zfs_rename_unlock(zfs_zlock_t
**zlpp
)
3567 while ((zl
= *zlpp
) != NULL
) {
3568 if (zl
->zl_znode
!= NULL
)
3569 zfs_iput_async(ZTOI(zl
->zl_znode
));
3570 rw_exit(zl
->zl_rwlock
);
3571 *zlpp
= zl
->zl_next
;
3572 kmem_free(zl
, sizeof (*zl
));
3577 * Search back through the directory tree, using the ".." entries.
3578 * Lock each directory in the chain to prevent concurrent renames.
3579 * Fail any attempt to move a directory into one of its own descendants.
3580 * XXX - z_parent_lock can overlap with map or grow locks
3583 zfs_rename_lock(znode_t
*szp
, znode_t
*tdzp
, znode_t
*sdzp
, zfs_zlock_t
**zlpp
)
3587 uint64_t rootid
= ZTOZSB(zp
)->z_root
;
3588 uint64_t oidp
= zp
->z_id
;
3589 krwlock_t
*rwlp
= &szp
->z_parent_lock
;
3590 krw_t rw
= RW_WRITER
;
3593 * First pass write-locks szp and compares to zp->z_id.
3594 * Later passes read-lock zp and compare to zp->z_parent.
3597 if (!rw_tryenter(rwlp
, rw
)) {
3599 * Another thread is renaming in this path.
3600 * Note that if we are a WRITER, we don't have any
3601 * parent_locks held yet.
3603 if (rw
== RW_READER
&& zp
->z_id
> szp
->z_id
) {
3605 * Drop our locks and restart
3607 zfs_rename_unlock(&zl
);
3611 rwlp
= &szp
->z_parent_lock
;
3616 * Wait for other thread to drop its locks
3622 zl
= kmem_alloc(sizeof (*zl
), KM_SLEEP
);
3623 zl
->zl_rwlock
= rwlp
;
3624 zl
->zl_znode
= NULL
;
3625 zl
->zl_next
= *zlpp
;
3628 if (oidp
== szp
->z_id
) /* We're a descendant of szp */
3629 return (SET_ERROR(EINVAL
));
3631 if (oidp
== rootid
) /* We've hit the top */
3634 if (rw
== RW_READER
) { /* i.e. not the first pass */
3635 int error
= zfs_zget(ZTOZSB(zp
), oidp
, &zp
);
3640 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_PARENT(ZTOZSB(zp
)),
3641 &oidp
, sizeof (oidp
));
3642 rwlp
= &zp
->z_parent_lock
;
3645 } while (zp
->z_id
!= sdzp
->z_id
);
3651 * Move an entry from the provided source directory to the target
3652 * directory. Change the entry name as indicated.
3654 * IN: sdip - Source directory containing the "old entry".
3655 * snm - Old entry name.
3656 * tdip - Target directory to contain the "new entry".
3657 * tnm - New entry name.
3658 * cr - credentials of caller.
3659 * flags - case flags
3661 * RETURN: 0 on success, error code on failure.
3664 * sdip,tdip - ctime|mtime updated
3668 zfs_rename(struct inode
*sdip
, char *snm
, struct inode
*tdip
, char *tnm
,
3669 cred_t
*cr
, int flags
)
3671 znode_t
*tdzp
, *szp
, *tzp
;
3672 znode_t
*sdzp
= ITOZ(sdip
);
3673 zfsvfs_t
*zfsvfs
= ITOZSB(sdip
);
3675 zfs_dirlock_t
*sdl
, *tdl
;
3678 int cmp
, serr
, terr
;
3681 boolean_t waited
= B_FALSE
;
3683 if (snm
== NULL
|| tnm
== NULL
)
3684 return (SET_ERROR(EINVAL
));
3687 ZFS_VERIFY_ZP(sdzp
);
3688 zilog
= zfsvfs
->z_log
;
3691 ZFS_VERIFY_ZP(tdzp
);
3694 * We check i_sb because snapshots and the ctldir must have different
3697 if (tdip
->i_sb
!= sdip
->i_sb
|| zfsctl_is_node(tdip
)) {
3699 return (SET_ERROR(EXDEV
));
3702 if (zfsvfs
->z_utf8
&& u8_validate(tnm
,
3703 strlen(tnm
), NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
3705 return (SET_ERROR(EILSEQ
));
3708 if (flags
& FIGNORECASE
)
3717 * This is to prevent the creation of links into attribute space
3718 * by renaming a linked file into/outof an attribute directory.
3719 * See the comment in zfs_link() for why this is considered bad.
3721 if ((tdzp
->z_pflags
& ZFS_XATTR
) != (sdzp
->z_pflags
& ZFS_XATTR
)) {
3723 return (SET_ERROR(EINVAL
));
3727 * Lock source and target directory entries. To prevent deadlock,
3728 * a lock ordering must be defined. We lock the directory with
3729 * the smallest object id first, or if it's a tie, the one with
3730 * the lexically first name.
3732 if (sdzp
->z_id
< tdzp
->z_id
) {
3734 } else if (sdzp
->z_id
> tdzp
->z_id
) {
3738 * First compare the two name arguments without
3739 * considering any case folding.
3741 int nofold
= (zfsvfs
->z_norm
& ~U8_TEXTPREP_TOUPPER
);
3743 cmp
= u8_strcmp(snm
, tnm
, 0, nofold
, U8_UNICODE_LATEST
, &error
);
3744 ASSERT(error
== 0 || !zfsvfs
->z_utf8
);
3747 * POSIX: "If the old argument and the new argument
3748 * both refer to links to the same existing file,
3749 * the rename() function shall return successfully
3750 * and perform no other action."
3756 * If the file system is case-folding, then we may
3757 * have some more checking to do. A case-folding file
3758 * system is either supporting mixed case sensitivity
3759 * access or is completely case-insensitive. Note
3760 * that the file system is always case preserving.
3762 * In mixed sensitivity mode case sensitive behavior
3763 * is the default. FIGNORECASE must be used to
3764 * explicitly request case insensitive behavior.
3766 * If the source and target names provided differ only
3767 * by case (e.g., a request to rename 'tim' to 'Tim'),
3768 * we will treat this as a special case in the
3769 * case-insensitive mode: as long as the source name
3770 * is an exact match, we will allow this to proceed as
3771 * a name-change request.
3773 if ((zfsvfs
->z_case
== ZFS_CASE_INSENSITIVE
||
3774 (zfsvfs
->z_case
== ZFS_CASE_MIXED
&&
3775 flags
& FIGNORECASE
)) &&
3776 u8_strcmp(snm
, tnm
, 0, zfsvfs
->z_norm
, U8_UNICODE_LATEST
,
3779 * case preserving rename request, require exact
3788 * If the source and destination directories are the same, we should
3789 * grab the z_name_lock of that directory only once.
3793 rw_enter(&sdzp
->z_name_lock
, RW_READER
);
3797 serr
= zfs_dirent_lock(&sdl
, sdzp
, snm
, &szp
,
3798 ZEXISTS
| zflg
, NULL
, NULL
);
3799 terr
= zfs_dirent_lock(&tdl
,
3800 tdzp
, tnm
, &tzp
, ZRENAMING
| zflg
, NULL
, NULL
);
3802 terr
= zfs_dirent_lock(&tdl
,
3803 tdzp
, tnm
, &tzp
, zflg
, NULL
, NULL
);
3804 serr
= zfs_dirent_lock(&sdl
,
3805 sdzp
, snm
, &szp
, ZEXISTS
| ZRENAMING
| zflg
,
3811 * Source entry invalid or not there.
3814 zfs_dirent_unlock(tdl
);
3820 rw_exit(&sdzp
->z_name_lock
);
3822 if (strcmp(snm
, "..") == 0)
3828 zfs_dirent_unlock(sdl
);
3832 rw_exit(&sdzp
->z_name_lock
);
3834 if (strcmp(tnm
, "..") == 0)
3841 * If we are using project inheritance, means if the directory has
3842 * ZFS_PROJINHERIT set, then its descendant directories will inherit
3843 * not only the project ID, but also the ZFS_PROJINHERIT flag. Under
3844 * such case, we only allow renames into our tree when the project
3847 if (tdzp
->z_pflags
& ZFS_PROJINHERIT
&&
3848 tdzp
->z_projid
!= szp
->z_projid
) {
3849 error
= SET_ERROR(EXDEV
);
3854 * Must have write access at the source to remove the old entry
3855 * and write access at the target to create the new entry.
3856 * Note that if target and source are the same, this can be
3857 * done in a single check.
3860 if ((error
= zfs_zaccess_rename(sdzp
, szp
, tdzp
, tzp
, cr
)))
3863 if (S_ISDIR(ZTOI(szp
)->i_mode
)) {
3865 * Check to make sure rename is valid.
3866 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3868 if ((error
= zfs_rename_lock(szp
, tdzp
, sdzp
, &zl
)))
3873 * Does target exist?
3877 * Source and target must be the same type.
3879 if (S_ISDIR(ZTOI(szp
)->i_mode
)) {
3880 if (!S_ISDIR(ZTOI(tzp
)->i_mode
)) {
3881 error
= SET_ERROR(ENOTDIR
);
3885 if (S_ISDIR(ZTOI(tzp
)->i_mode
)) {
3886 error
= SET_ERROR(EISDIR
);
3891 * POSIX dictates that when the source and target
3892 * entries refer to the same file object, rename
3893 * must do nothing and exit without error.
3895 if (szp
->z_id
== tzp
->z_id
) {
3901 tx
= dmu_tx_create(zfsvfs
->z_os
);
3902 dmu_tx_hold_sa(tx
, szp
->z_sa_hdl
, B_FALSE
);
3903 dmu_tx_hold_sa(tx
, sdzp
->z_sa_hdl
, B_FALSE
);
3904 dmu_tx_hold_zap(tx
, sdzp
->z_id
, FALSE
, snm
);
3905 dmu_tx_hold_zap(tx
, tdzp
->z_id
, TRUE
, tnm
);
3907 dmu_tx_hold_sa(tx
, tdzp
->z_sa_hdl
, B_FALSE
);
3908 zfs_sa_upgrade_txholds(tx
, tdzp
);
3911 dmu_tx_hold_sa(tx
, tzp
->z_sa_hdl
, B_FALSE
);
3912 zfs_sa_upgrade_txholds(tx
, tzp
);
3915 zfs_sa_upgrade_txholds(tx
, szp
);
3916 dmu_tx_hold_zap(tx
, zfsvfs
->z_unlinkedobj
, FALSE
, NULL
);
3917 error
= dmu_tx_assign(tx
, (waited
? TXG_NOTHROTTLE
: 0) | TXG_NOWAIT
);
3920 zfs_rename_unlock(&zl
);
3921 zfs_dirent_unlock(sdl
);
3922 zfs_dirent_unlock(tdl
);
3925 rw_exit(&sdzp
->z_name_lock
);
3927 if (error
== ERESTART
) {
3944 if (tzp
) /* Attempt to remove the existing target */
3945 error
= zfs_link_destroy(tdl
, tzp
, tx
, zflg
, NULL
);
3948 error
= zfs_link_create(tdl
, szp
, tx
, ZRENAMING
);
3950 szp
->z_pflags
|= ZFS_AV_MODIFIED
;
3951 if (tdzp
->z_pflags
& ZFS_PROJINHERIT
)
3952 szp
->z_pflags
|= ZFS_PROJINHERIT
;
3954 error
= sa_update(szp
->z_sa_hdl
, SA_ZPL_FLAGS(zfsvfs
),
3955 (void *)&szp
->z_pflags
, sizeof (uint64_t), tx
);
3958 error
= zfs_link_destroy(sdl
, szp
, tx
, ZRENAMING
, NULL
);
3960 zfs_log_rename(zilog
, tx
, TX_RENAME
|
3961 (flags
& FIGNORECASE
? TX_CI
: 0), sdzp
,
3962 sdl
->dl_name
, tdzp
, tdl
->dl_name
, szp
);
3965 * At this point, we have successfully created
3966 * the target name, but have failed to remove
3967 * the source name. Since the create was done
3968 * with the ZRENAMING flag, there are
3969 * complications; for one, the link count is
3970 * wrong. The easiest way to deal with this
3971 * is to remove the newly created target, and
3972 * return the original error. This must
3973 * succeed; fortunately, it is very unlikely to
3974 * fail, since we just created it.
3976 VERIFY3U(zfs_link_destroy(tdl
, szp
, tx
,
3977 ZRENAMING
, NULL
), ==, 0);
3981 * If we had removed the existing target, subsequent
3982 * call to zfs_link_create() to add back the same entry
3983 * but, the new dnode (szp) should not fail.
3985 ASSERT(tzp
== NULL
);
3992 zfs_rename_unlock(&zl
);
3994 zfs_dirent_unlock(sdl
);
3995 zfs_dirent_unlock(tdl
);
3997 zfs_inode_update(sdzp
);
3999 rw_exit(&sdzp
->z_name_lock
);
4002 zfs_inode_update(tdzp
);
4004 zfs_inode_update(szp
);
4007 zfs_inode_update(tzp
);
4011 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
4012 zil_commit(zilog
, 0);
4019 * Insert the indicated symbolic reference entry into the directory.
4021 * IN: dip - Directory to contain new symbolic link.
4022 * link - Name for new symlink entry.
4023 * vap - Attributes of new entry.
4024 * target - Target path of new symlink.
4026 * cr - credentials of caller.
4027 * flags - case flags
4029 * RETURN: 0 on success, error code on failure.
4032 * dip - ctime|mtime updated
4036 zfs_symlink(struct inode
*dip
, char *name
, vattr_t
*vap
, char *link
,
4037 struct inode
**ipp
, cred_t
*cr
, int flags
)
4039 znode_t
*zp
, *dzp
= ITOZ(dip
);
4042 zfsvfs_t
*zfsvfs
= ITOZSB(dip
);
4044 uint64_t len
= strlen(link
);
4047 zfs_acl_ids_t acl_ids
;
4048 boolean_t fuid_dirtied
;
4049 uint64_t txtype
= TX_SYMLINK
;
4050 boolean_t waited
= B_FALSE
;
4052 ASSERT(S_ISLNK(vap
->va_mode
));
4055 return (SET_ERROR(EINVAL
));
4059 zilog
= zfsvfs
->z_log
;
4061 if (zfsvfs
->z_utf8
&& u8_validate(name
, strlen(name
),
4062 NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
4064 return (SET_ERROR(EILSEQ
));
4066 if (flags
& FIGNORECASE
)
4069 if (len
> MAXPATHLEN
) {
4071 return (SET_ERROR(ENAMETOOLONG
));
4074 if ((error
= zfs_acl_ids_create(dzp
, 0,
4075 vap
, cr
, NULL
, &acl_ids
)) != 0) {
4083 * Attempt to lock directory; fail if entry already exists.
4085 error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
, NULL
, NULL
);
4087 zfs_acl_ids_free(&acl_ids
);
4092 if ((error
= zfs_zaccess(dzp
, ACE_ADD_FILE
, 0, B_FALSE
, cr
))) {
4093 zfs_acl_ids_free(&acl_ids
);
4094 zfs_dirent_unlock(dl
);
4099 if (zfs_acl_ids_overquota(zfsvfs
, &acl_ids
, ZFS_DEFAULT_PROJID
)) {
4100 zfs_acl_ids_free(&acl_ids
);
4101 zfs_dirent_unlock(dl
);
4103 return (SET_ERROR(EDQUOT
));
4105 tx
= dmu_tx_create(zfsvfs
->z_os
);
4106 fuid_dirtied
= zfsvfs
->z_fuid_dirty
;
4107 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
, 0, MAX(1, len
));
4108 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, name
);
4109 dmu_tx_hold_sa_create(tx
, acl_ids
.z_aclp
->z_acl_bytes
+
4110 ZFS_SA_BASE_ATTR_SIZE
+ len
);
4111 dmu_tx_hold_sa(tx
, dzp
->z_sa_hdl
, B_FALSE
);
4112 if (!zfsvfs
->z_use_sa
&& acl_ids
.z_aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
4113 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
, 0,
4114 acl_ids
.z_aclp
->z_acl_bytes
);
4117 zfs_fuid_txhold(zfsvfs
, tx
);
4118 error
= dmu_tx_assign(tx
, (waited
? TXG_NOTHROTTLE
: 0) | TXG_NOWAIT
);
4120 zfs_dirent_unlock(dl
);
4121 if (error
== ERESTART
) {
4127 zfs_acl_ids_free(&acl_ids
);
4134 * Create a new object for the symlink.
4135 * for version 4 ZPL datsets the symlink will be an SA attribute
4137 zfs_mknode(dzp
, vap
, tx
, cr
, 0, &zp
, &acl_ids
);
4140 zfs_fuid_sync(zfsvfs
, tx
);
4142 mutex_enter(&zp
->z_lock
);
4144 error
= sa_update(zp
->z_sa_hdl
, SA_ZPL_SYMLINK(zfsvfs
),
4147 zfs_sa_symlink(zp
, link
, len
, tx
);
4148 mutex_exit(&zp
->z_lock
);
4151 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_SIZE(zfsvfs
),
4152 &zp
->z_size
, sizeof (zp
->z_size
), tx
);
4154 * Insert the new object into the directory.
4156 error
= zfs_link_create(dl
, zp
, tx
, ZNEW
);
4158 zfs_znode_delete(zp
, tx
);
4159 remove_inode_hash(ZTOI(zp
));
4161 if (flags
& FIGNORECASE
)
4163 zfs_log_symlink(zilog
, tx
, txtype
, dzp
, zp
, name
, link
);
4165 zfs_inode_update(dzp
);
4166 zfs_inode_update(zp
);
4169 zfs_acl_ids_free(&acl_ids
);
4173 zfs_dirent_unlock(dl
);
4178 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
4179 zil_commit(zilog
, 0);
4189 * Return, in the buffer contained in the provided uio structure,
4190 * the symbolic path referred to by ip.
4192 * IN: ip - inode of symbolic link
4193 * uio - structure to contain the link path.
4194 * cr - credentials of caller.
4196 * RETURN: 0 if success
4197 * error code if failure
4200 * ip - atime updated
4204 zfs_readlink(struct inode
*ip
, uio_t
*uio
, cred_t
*cr
)
4206 znode_t
*zp
= ITOZ(ip
);
4207 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
4213 mutex_enter(&zp
->z_lock
);
4215 error
= sa_lookup_uio(zp
->z_sa_hdl
,
4216 SA_ZPL_SYMLINK(zfsvfs
), uio
);
4218 error
= zfs_sa_readlink(zp
, uio
);
4219 mutex_exit(&zp
->z_lock
);
4226 * Insert a new entry into directory tdip referencing sip.
4228 * IN: tdip - Directory to contain new entry.
4229 * sip - inode of new entry.
4230 * name - name of new entry.
4231 * cr - credentials of caller.
4233 * RETURN: 0 if success
4234 * error code if failure
4237 * tdip - ctime|mtime updated
4238 * sip - ctime updated
4242 zfs_link(struct inode
*tdip
, struct inode
*sip
, char *name
, cred_t
*cr
,
4245 znode_t
*dzp
= ITOZ(tdip
);
4247 zfsvfs_t
*zfsvfs
= ITOZSB(tdip
);
4255 boolean_t waited
= B_FALSE
;
4256 boolean_t is_tmpfile
= 0;
4259 is_tmpfile
= (sip
->i_nlink
== 0 && (sip
->i_state
& I_LINKABLE
));
4261 ASSERT(S_ISDIR(tdip
->i_mode
));
4264 return (SET_ERROR(EINVAL
));
4268 zilog
= zfsvfs
->z_log
;
4271 * POSIX dictates that we return EPERM here.
4272 * Better choices include ENOTSUP or EISDIR.
4274 if (S_ISDIR(sip
->i_mode
)) {
4276 return (SET_ERROR(EPERM
));
4283 * If we are using project inheritance, means if the directory has
4284 * ZFS_PROJINHERIT set, then its descendant directories will inherit
4285 * not only the project ID, but also the ZFS_PROJINHERIT flag. Under
4286 * such case, we only allow hard link creation in our tree when the
4287 * project IDs are the same.
4289 if (dzp
->z_pflags
& ZFS_PROJINHERIT
&& dzp
->z_projid
!= szp
->z_projid
) {
4291 return (SET_ERROR(EXDEV
));
4295 * We check i_sb because snapshots and the ctldir must have different
4298 if (sip
->i_sb
!= tdip
->i_sb
|| zfsctl_is_node(sip
)) {
4300 return (SET_ERROR(EXDEV
));
4303 /* Prevent links to .zfs/shares files */
4305 if ((error
= sa_lookup(szp
->z_sa_hdl
, SA_ZPL_PARENT(zfsvfs
),
4306 &parent
, sizeof (uint64_t))) != 0) {
4310 if (parent
== zfsvfs
->z_shares_dir
) {
4312 return (SET_ERROR(EPERM
));
4315 if (zfsvfs
->z_utf8
&& u8_validate(name
,
4316 strlen(name
), NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
4318 return (SET_ERROR(EILSEQ
));
4320 if (flags
& FIGNORECASE
)
4324 * We do not support links between attributes and non-attributes
4325 * because of the potential security risk of creating links
4326 * into "normal" file space in order to circumvent restrictions
4327 * imposed in attribute space.
4329 if ((szp
->z_pflags
& ZFS_XATTR
) != (dzp
->z_pflags
& ZFS_XATTR
)) {
4331 return (SET_ERROR(EINVAL
));
4334 owner
= zfs_fuid_map_id(zfsvfs
, KUID_TO_SUID(sip
->i_uid
),
4336 if (owner
!= crgetuid(cr
) && secpolicy_basic_link(cr
) != 0) {
4338 return (SET_ERROR(EPERM
));
4341 if ((error
= zfs_zaccess(dzp
, ACE_ADD_FILE
, 0, B_FALSE
, cr
))) {
4348 * Attempt to lock directory; fail if entry already exists.
4350 error
= zfs_dirent_lock(&dl
, dzp
, name
, &tzp
, zf
, NULL
, NULL
);
4356 tx
= dmu_tx_create(zfsvfs
->z_os
);
4357 dmu_tx_hold_sa(tx
, szp
->z_sa_hdl
, B_FALSE
);
4358 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, name
);
4360 dmu_tx_hold_zap(tx
, zfsvfs
->z_unlinkedobj
, FALSE
, NULL
);
4362 zfs_sa_upgrade_txholds(tx
, szp
);
4363 zfs_sa_upgrade_txholds(tx
, dzp
);
4364 error
= dmu_tx_assign(tx
, (waited
? TXG_NOTHROTTLE
: 0) | TXG_NOWAIT
);
4366 zfs_dirent_unlock(dl
);
4367 if (error
== ERESTART
) {
4377 /* unmark z_unlinked so zfs_link_create will not reject */
4379 szp
->z_unlinked
= 0;
4380 error
= zfs_link_create(dl
, szp
, tx
, 0);
4383 uint64_t txtype
= TX_LINK
;
4385 * tmpfile is created to be in z_unlinkedobj, so remove it.
4386 * Also, we don't log in ZIL, be cause all previous file
4387 * operation on the tmpfile are ignored by ZIL. Instead we
4388 * always wait for txg to sync to make sure all previous
4389 * operation are sync safe.
4392 VERIFY(zap_remove_int(zfsvfs
->z_os
,
4393 zfsvfs
->z_unlinkedobj
, szp
->z_id
, tx
) == 0);
4395 if (flags
& FIGNORECASE
)
4397 zfs_log_link(zilog
, tx
, txtype
, dzp
, szp
, name
);
4399 } else if (is_tmpfile
) {
4400 /* restore z_unlinked since when linking failed */
4401 szp
->z_unlinked
= 1;
4403 txg
= dmu_tx_get_txg(tx
);
4406 zfs_dirent_unlock(dl
);
4408 if (!is_tmpfile
&& zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
4409 zil_commit(zilog
, 0);
4412 txg_wait_synced(dmu_objset_pool(zfsvfs
->z_os
), txg
);
4414 zfs_inode_update(dzp
);
4415 zfs_inode_update(szp
);
4421 zfs_putpage_commit_cb(void *arg
)
4423 struct page
*pp
= arg
;
4426 end_page_writeback(pp
);
4430 * Push a page out to disk, once the page is on stable storage the
4431 * registered commit callback will be run as notification of completion.
4433 * IN: ip - page mapped for inode.
4434 * pp - page to push (page is locked)
4435 * wbc - writeback control data
4437 * RETURN: 0 if success
4438 * error code if failure
4441 * ip - ctime|mtime updated
4445 zfs_putpage(struct inode
*ip
, struct page
*pp
, struct writeback_control
*wbc
)
4447 znode_t
*zp
= ITOZ(ip
);
4448 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
4455 uint64_t mtime
[2], ctime
[2];
4456 sa_bulk_attr_t bulk
[3];
4458 struct address_space
*mapping
;
4463 ASSERT(PageLocked(pp
));
4465 pgoff
= page_offset(pp
); /* Page byte-offset in file */
4466 offset
= i_size_read(ip
); /* File length in bytes */
4467 pglen
= MIN(PAGE_SIZE
, /* Page length in bytes */
4468 P2ROUNDUP(offset
, PAGE_SIZE
)-pgoff
);
4470 /* Page is beyond end of file */
4471 if (pgoff
>= offset
) {
4477 /* Truncate page length to end of file */
4478 if (pgoff
+ pglen
> offset
)
4479 pglen
= offset
- pgoff
;
4483 * FIXME: Allow mmap writes past its quota. The correct fix
4484 * is to register a page_mkwrite() handler to count the page
4485 * against its quota when it is about to be dirtied.
4487 if (zfs_id_overblockquota(zfsvfs
, DMU_USERUSED_OBJECT
,
4488 KUID_TO_SUID(ip
->i_uid
)) ||
4489 zfs_id_overblockquota(zfsvfs
, DMU_GROUPUSED_OBJECT
,
4490 KGID_TO_SGID(ip
->i_gid
)) ||
4491 (zp
->z_projid
!= ZFS_DEFAULT_PROJID
&&
4492 zfs_id_overblockquota(zfsvfs
, DMU_PROJECTUSED_OBJECT
,
4499 * The ordering here is critical and must adhere to the following
4500 * rules in order to avoid deadlocking in either zfs_read() or
4501 * zfs_free_range() due to a lock inversion.
4503 * 1) The page must be unlocked prior to acquiring the range lock.
4504 * This is critical because zfs_read() calls find_lock_page()
4505 * which may block on the page lock while holding the range lock.
4507 * 2) Before setting or clearing write back on a page the range lock
4508 * must be held in order to prevent a lock inversion with the
4509 * zfs_free_range() function.
4511 * This presents a problem because upon entering this function the
4512 * page lock is already held. To safely acquire the range lock the
4513 * page lock must be dropped. This creates a window where another
4514 * process could truncate, invalidate, dirty, or write out the page.
4516 * Therefore, after successfully reacquiring the range and page locks
4517 * the current page state is checked. In the common case everything
4518 * will be as is expected and it can be written out. However, if
4519 * the page state has changed it must be handled accordingly.
4521 mapping
= pp
->mapping
;
4522 redirty_page_for_writepage(wbc
, pp
);
4525 locked_range_t
*lr
= rangelock_enter(&zp
->z_rangelock
,
4526 pgoff
, pglen
, RL_WRITER
);
4529 /* Page mapping changed or it was no longer dirty, we're done */
4530 if (unlikely((mapping
!= pp
->mapping
) || !PageDirty(pp
))) {
4537 /* Another process started write block if required */
4538 if (PageWriteback(pp
)) {
4542 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
4543 wait_on_page_writeback(pp
);
4549 /* Clear the dirty flag the required locks are held */
4550 if (!clear_page_dirty_for_io(pp
)) {
4558 * Counterpart for redirty_page_for_writepage() above. This page
4559 * was in fact not skipped and should not be counted as if it were.
4561 wbc
->pages_skipped
--;
4562 set_page_writeback(pp
);
4565 tx
= dmu_tx_create(zfsvfs
->z_os
);
4566 dmu_tx_hold_write(tx
, zp
->z_id
, pgoff
, pglen
);
4567 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
4568 zfs_sa_upgrade_txholds(tx
, zp
);
4570 err
= dmu_tx_assign(tx
, TXG_NOWAIT
);
4572 if (err
== ERESTART
)
4576 __set_page_dirty_nobuffers(pp
);
4578 end_page_writeback(pp
);
4585 ASSERT3U(pglen
, <=, PAGE_SIZE
);
4586 dmu_write(zfsvfs
->z_os
, zp
->z_id
, pgoff
, pglen
, va
, tx
);
4589 SA_ADD_BULK_ATTR(bulk
, cnt
, SA_ZPL_MTIME(zfsvfs
), NULL
, &mtime
, 16);
4590 SA_ADD_BULK_ATTR(bulk
, cnt
, SA_ZPL_CTIME(zfsvfs
), NULL
, &ctime
, 16);
4591 SA_ADD_BULK_ATTR(bulk
, cnt
, SA_ZPL_FLAGS(zfsvfs
), NULL
,
4594 /* Preserve the mtime and ctime provided by the inode */
4595 ZFS_TIME_ENCODE(&ip
->i_mtime
, mtime
);
4596 ZFS_TIME_ENCODE(&ip
->i_ctime
, ctime
);
4597 zp
->z_atime_dirty
= 0;
4600 err
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, cnt
, tx
);
4602 zfs_log_write(zfsvfs
->z_log
, tx
, TX_WRITE
, zp
, pgoff
, pglen
, 0,
4603 zfs_putpage_commit_cb
, pp
);
4608 if (wbc
->sync_mode
!= WB_SYNC_NONE
) {
4610 * Note that this is rarely called under writepages(), because
4611 * writepages() normally handles the entire commit for
4612 * performance reasons.
4614 zil_commit(zfsvfs
->z_log
, zp
->z_id
);
4622 * Update the system attributes when the inode has been dirtied. For the
4623 * moment we only update the mode, atime, mtime, and ctime.
4626 zfs_dirty_inode(struct inode
*ip
, int flags
)
4628 znode_t
*zp
= ITOZ(ip
);
4629 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
4631 uint64_t mode
, atime
[2], mtime
[2], ctime
[2];
4632 sa_bulk_attr_t bulk
[4];
4636 if (zfs_is_readonly(zfsvfs
) || dmu_objset_is_snapshot(zfsvfs
->z_os
))
4644 * This is the lazytime semantic indroduced in Linux 4.0
4645 * This flag will only be called from update_time when lazytime is set.
4646 * (Note, I_DIRTY_SYNC will also set if not lazytime)
4647 * Fortunately mtime and ctime are managed within ZFS itself, so we
4648 * only need to dirty atime.
4650 if (flags
== I_DIRTY_TIME
) {
4651 zp
->z_atime_dirty
= 1;
4656 tx
= dmu_tx_create(zfsvfs
->z_os
);
4658 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
4659 zfs_sa_upgrade_txholds(tx
, zp
);
4661 error
= dmu_tx_assign(tx
, TXG_WAIT
);
4667 mutex_enter(&zp
->z_lock
);
4668 zp
->z_atime_dirty
= 0;
4670 SA_ADD_BULK_ATTR(bulk
, cnt
, SA_ZPL_MODE(zfsvfs
), NULL
, &mode
, 8);
4671 SA_ADD_BULK_ATTR(bulk
, cnt
, SA_ZPL_ATIME(zfsvfs
), NULL
, &atime
, 16);
4672 SA_ADD_BULK_ATTR(bulk
, cnt
, SA_ZPL_MTIME(zfsvfs
), NULL
, &mtime
, 16);
4673 SA_ADD_BULK_ATTR(bulk
, cnt
, SA_ZPL_CTIME(zfsvfs
), NULL
, &ctime
, 16);
4675 /* Preserve the mode, mtime and ctime provided by the inode */
4676 ZFS_TIME_ENCODE(&ip
->i_atime
, atime
);
4677 ZFS_TIME_ENCODE(&ip
->i_mtime
, mtime
);
4678 ZFS_TIME_ENCODE(&ip
->i_ctime
, ctime
);
4683 error
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, cnt
, tx
);
4684 mutex_exit(&zp
->z_lock
);
4694 zfs_inactive(struct inode
*ip
)
4696 znode_t
*zp
= ITOZ(ip
);
4697 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
4700 int need_unlock
= 0;
4702 /* Only read lock if we haven't already write locked, e.g. rollback */
4703 if (!RW_WRITE_HELD(&zfsvfs
->z_teardown_inactive_lock
)) {
4705 rw_enter(&zfsvfs
->z_teardown_inactive_lock
, RW_READER
);
4707 if (zp
->z_sa_hdl
== NULL
) {
4709 rw_exit(&zfsvfs
->z_teardown_inactive_lock
);
4713 if (zp
->z_atime_dirty
&& zp
->z_unlinked
== 0) {
4714 dmu_tx_t
*tx
= dmu_tx_create(zfsvfs
->z_os
);
4716 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
4717 zfs_sa_upgrade_txholds(tx
, zp
);
4718 error
= dmu_tx_assign(tx
, TXG_WAIT
);
4722 ZFS_TIME_ENCODE(&ip
->i_atime
, atime
);
4723 mutex_enter(&zp
->z_lock
);
4724 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_ATIME(zfsvfs
),
4725 (void *)&atime
, sizeof (atime
), tx
);
4726 zp
->z_atime_dirty
= 0;
4727 mutex_exit(&zp
->z_lock
);
4734 rw_exit(&zfsvfs
->z_teardown_inactive_lock
);
4738 * Bounds-check the seek operation.
4740 * IN: ip - inode seeking within
4741 * ooff - old file offset
4742 * noffp - pointer to new file offset
4743 * ct - caller context
4745 * RETURN: 0 if success
4746 * EINVAL if new offset invalid
4750 zfs_seek(struct inode
*ip
, offset_t ooff
, offset_t
*noffp
)
4752 if (S_ISDIR(ip
->i_mode
))
4754 return ((*noffp
< 0 || *noffp
> MAXOFFSET_T
) ? EINVAL
: 0);
4758 * Fill pages with data from the disk.
4761 zfs_fillpage(struct inode
*ip
, struct page
*pl
[], int nr_pages
)
4763 znode_t
*zp
= ITOZ(ip
);
4764 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
4766 struct page
*cur_pp
;
4767 u_offset_t io_off
, total
;
4774 io_len
= nr_pages
<< PAGE_SHIFT
;
4775 i_size
= i_size_read(ip
);
4776 io_off
= page_offset(pl
[0]);
4778 if (io_off
+ io_len
> i_size
)
4779 io_len
= i_size
- io_off
;
4782 * Iterate over list of pages and read each page individually.
4785 for (total
= io_off
+ io_len
; io_off
< total
; io_off
+= PAGESIZE
) {
4788 cur_pp
= pl
[page_idx
++];
4790 err
= dmu_read(os
, zp
->z_id
, io_off
, PAGESIZE
, va
,
4794 /* convert checksum errors into IO errors */
4796 err
= SET_ERROR(EIO
);
4805 * Uses zfs_fillpage to read data from the file and fill the pages.
4807 * IN: ip - inode of file to get data from.
4808 * pl - list of pages to read
4809 * nr_pages - number of pages to read
4811 * RETURN: 0 on success, error code on failure.
4814 * vp - atime updated
4818 zfs_getpage(struct inode
*ip
, struct page
*pl
[], int nr_pages
)
4820 znode_t
*zp
= ITOZ(ip
);
4821 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
4830 err
= zfs_fillpage(ip
, pl
, nr_pages
);
4837 * Check ZFS specific permissions to memory map a section of a file.
4839 * IN: ip - inode of the file to mmap
4841 * addrp - start address in memory region
4842 * len - length of memory region
4843 * vm_flags- address flags
4845 * RETURN: 0 if success
4846 * error code if failure
4850 zfs_map(struct inode
*ip
, offset_t off
, caddr_t
*addrp
, size_t len
,
4851 unsigned long vm_flags
)
4853 znode_t
*zp
= ITOZ(ip
);
4854 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
4859 if ((vm_flags
& VM_WRITE
) && (zp
->z_pflags
&
4860 (ZFS_IMMUTABLE
| ZFS_READONLY
| ZFS_APPENDONLY
))) {
4862 return (SET_ERROR(EPERM
));
4865 if ((vm_flags
& (VM_READ
| VM_EXEC
)) &&
4866 (zp
->z_pflags
& ZFS_AV_QUARANTINED
)) {
4868 return (SET_ERROR(EACCES
));
4871 if (off
< 0 || len
> MAXOFFSET_T
- off
) {
4873 return (SET_ERROR(ENXIO
));
4881 * convoff - converts the given data (start, whence) to the
4885 convoff(struct inode
*ip
, flock64_t
*lckdat
, int whence
, offset_t offset
)
4890 if ((lckdat
->l_whence
== 2) || (whence
== 2)) {
4891 if ((error
= zfs_getattr(ip
, &vap
, 0, CRED())))
4895 switch (lckdat
->l_whence
) {
4897 lckdat
->l_start
+= offset
;
4900 lckdat
->l_start
+= vap
.va_size
;
4905 return (SET_ERROR(EINVAL
));
4908 if (lckdat
->l_start
< 0)
4909 return (SET_ERROR(EINVAL
));
4913 lckdat
->l_start
-= offset
;
4916 lckdat
->l_start
-= vap
.va_size
;
4921 return (SET_ERROR(EINVAL
));
4924 lckdat
->l_whence
= (short)whence
;
4929 * Free or allocate space in a file. Currently, this function only
4930 * supports the `F_FREESP' command. However, this command is somewhat
4931 * misnamed, as its functionality includes the ability to allocate as
4932 * well as free space.
4934 * IN: ip - inode of file to free data in.
4935 * cmd - action to take (only F_FREESP supported).
4936 * bfp - section of file to free/alloc.
4937 * flag - current file open mode flags.
4938 * offset - current file offset.
4939 * cr - credentials of caller [UNUSED].
4941 * RETURN: 0 on success, error code on failure.
4944 * ip - ctime|mtime updated
4948 zfs_space(struct inode
*ip
, int cmd
, flock64_t
*bfp
, int flag
,
4949 offset_t offset
, cred_t
*cr
)
4951 znode_t
*zp
= ITOZ(ip
);
4952 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
4959 if (cmd
!= F_FREESP
) {
4961 return (SET_ERROR(EINVAL
));
4965 * Callers might not be able to detect properly that we are read-only,
4966 * so check it explicitly here.
4968 if (zfs_is_readonly(zfsvfs
)) {
4970 return (SET_ERROR(EROFS
));
4973 if ((error
= convoff(ip
, bfp
, 0, offset
))) {
4978 if (bfp
->l_len
< 0) {
4980 return (SET_ERROR(EINVAL
));
4984 * Permissions aren't checked on Solaris because on this OS
4985 * zfs_space() can only be called with an opened file handle.
4986 * On Linux we can get here through truncate_range() which
4987 * operates directly on inodes, so we need to check access rights.
4989 if ((error
= zfs_zaccess(zp
, ACE_WRITE_DATA
, 0, B_FALSE
, cr
))) {
4995 len
= bfp
->l_len
; /* 0 means from off to end of file */
4997 error
= zfs_freesp(zp
, off
, len
, flag
, TRUE
);
5005 zfs_fid(struct inode
*ip
, fid_t
*fidp
)
5007 znode_t
*zp
= ITOZ(ip
);
5008 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
5011 uint64_t object
= zp
->z_id
;
5018 if ((error
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_GEN(zfsvfs
),
5019 &gen64
, sizeof (uint64_t))) != 0) {
5024 gen
= (uint32_t)gen64
;
5026 size
= SHORT_FID_LEN
;
5028 zfid
= (zfid_short_t
*)fidp
;
5030 zfid
->zf_len
= size
;
5032 for (i
= 0; i
< sizeof (zfid
->zf_object
); i
++)
5033 zfid
->zf_object
[i
] = (uint8_t)(object
>> (8 * i
));
5035 /* Must have a non-zero generation number to distinguish from .zfs */
5038 for (i
= 0; i
< sizeof (zfid
->zf_gen
); i
++)
5039 zfid
->zf_gen
[i
] = (uint8_t)(gen
>> (8 * i
));
5047 zfs_getsecattr(struct inode
*ip
, vsecattr_t
*vsecp
, int flag
, cred_t
*cr
)
5049 znode_t
*zp
= ITOZ(ip
);
5050 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
5052 boolean_t skipaclchk
= (flag
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
5056 error
= zfs_getacl(zp
, vsecp
, skipaclchk
, cr
);
5064 zfs_setsecattr(struct inode
*ip
, vsecattr_t
*vsecp
, int flag
, cred_t
*cr
)
5066 znode_t
*zp
= ITOZ(ip
);
5067 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
5069 boolean_t skipaclchk
= (flag
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
5070 zilog_t
*zilog
= zfsvfs
->z_log
;
5075 error
= zfs_setacl(zp
, vsecp
, skipaclchk
, cr
);
5077 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
5078 zil_commit(zilog
, 0);
5084 #ifdef HAVE_UIO_ZEROCOPY
5086 * Tunable, both must be a power of 2.
5088 * zcr_blksz_min: the smallest read we may consider to loan out an arcbuf
5089 * zcr_blksz_max: if set to less than the file block size, allow loaning out of
5090 * an arcbuf for a partial block read
5092 int zcr_blksz_min
= (1 << 10); /* 1K */
5093 int zcr_blksz_max
= (1 << 17); /* 128K */
5097 zfs_reqzcbuf(struct inode
*ip
, enum uio_rw ioflag
, xuio_t
*xuio
, cred_t
*cr
)
5099 znode_t
*zp
= ITOZ(ip
);
5100 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
5101 int max_blksz
= zfsvfs
->z_max_blksz
;
5102 uio_t
*uio
= &xuio
->xu_uio
;
5103 ssize_t size
= uio
->uio_resid
;
5104 offset_t offset
= uio
->uio_loffset
;
5109 int preamble
, postamble
;
5111 if (xuio
->xu_type
!= UIOTYPE_ZEROCOPY
)
5112 return (SET_ERROR(EINVAL
));
5119 * Loan out an arc_buf for write if write size is bigger than
5120 * max_blksz, and the file's block size is also max_blksz.
5123 if (size
< blksz
|| zp
->z_blksz
!= blksz
) {
5125 return (SET_ERROR(EINVAL
));
5128 * Caller requests buffers for write before knowing where the
5129 * write offset might be (e.g. NFS TCP write).
5134 preamble
= P2PHASE(offset
, blksz
);
5136 preamble
= blksz
- preamble
;
5141 postamble
= P2PHASE(size
, blksz
);
5144 fullblk
= size
/ blksz
;
5145 (void) dmu_xuio_init(xuio
,
5146 (preamble
!= 0) + fullblk
+ (postamble
!= 0));
5149 * Have to fix iov base/len for partial buffers. They
5150 * currently represent full arc_buf's.
5153 /* data begins in the middle of the arc_buf */
5154 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
5157 (void) dmu_xuio_add(xuio
, abuf
,
5158 blksz
- preamble
, preamble
);
5161 for (i
= 0; i
< fullblk
; i
++) {
5162 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
5165 (void) dmu_xuio_add(xuio
, abuf
, 0, blksz
);
5169 /* data ends in the middle of the arc_buf */
5170 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
5173 (void) dmu_xuio_add(xuio
, abuf
, 0, postamble
);
5178 * Loan out an arc_buf for read if the read size is larger than
5179 * the current file block size. Block alignment is not
5180 * considered. Partial arc_buf will be loaned out for read.
5182 blksz
= zp
->z_blksz
;
5183 if (blksz
< zcr_blksz_min
)
5184 blksz
= zcr_blksz_min
;
5185 if (blksz
> zcr_blksz_max
)
5186 blksz
= zcr_blksz_max
;
5187 /* avoid potential complexity of dealing with it */
5188 if (blksz
> max_blksz
) {
5190 return (SET_ERROR(EINVAL
));
5193 maxsize
= zp
->z_size
- uio
->uio_loffset
;
5199 return (SET_ERROR(EINVAL
));
5204 return (SET_ERROR(EINVAL
));
5207 uio
->uio_extflg
= UIO_XUIO
;
5208 XUIO_XUZC_RW(xuio
) = ioflag
;
5215 zfs_retzcbuf(struct inode
*ip
, xuio_t
*xuio
, cred_t
*cr
)
5219 int ioflag
= XUIO_XUZC_RW(xuio
);
5221 ASSERT(xuio
->xu_type
== UIOTYPE_ZEROCOPY
);
5223 i
= dmu_xuio_cnt(xuio
);
5225 abuf
= dmu_xuio_arcbuf(xuio
, i
);
5227 * if abuf == NULL, it must be a write buffer
5228 * that has been returned in zfs_write().
5231 dmu_return_arcbuf(abuf
);
5232 ASSERT(abuf
|| ioflag
== UIO_WRITE
);
5235 dmu_xuio_fini(xuio
);
5238 #endif /* HAVE_UIO_ZEROCOPY */
5240 #if defined(_KERNEL)
5241 EXPORT_SYMBOL(zfs_open
);
5242 EXPORT_SYMBOL(zfs_close
);
5243 EXPORT_SYMBOL(zfs_read
);
5244 EXPORT_SYMBOL(zfs_write
);
5245 EXPORT_SYMBOL(zfs_access
);
5246 EXPORT_SYMBOL(zfs_lookup
);
5247 EXPORT_SYMBOL(zfs_create
);
5248 EXPORT_SYMBOL(zfs_tmpfile
);
5249 EXPORT_SYMBOL(zfs_remove
);
5250 EXPORT_SYMBOL(zfs_mkdir
);
5251 EXPORT_SYMBOL(zfs_rmdir
);
5252 EXPORT_SYMBOL(zfs_readdir
);
5253 EXPORT_SYMBOL(zfs_fsync
);
5254 EXPORT_SYMBOL(zfs_getattr
);
5255 EXPORT_SYMBOL(zfs_getattr_fast
);
5256 EXPORT_SYMBOL(zfs_setattr
);
5257 EXPORT_SYMBOL(zfs_rename
);
5258 EXPORT_SYMBOL(zfs_symlink
);
5259 EXPORT_SYMBOL(zfs_readlink
);
5260 EXPORT_SYMBOL(zfs_link
);
5261 EXPORT_SYMBOL(zfs_inactive
);
5262 EXPORT_SYMBOL(zfs_space
);
5263 EXPORT_SYMBOL(zfs_fid
);
5264 EXPORT_SYMBOL(zfs_getsecattr
);
5265 EXPORT_SYMBOL(zfs_setsecattr
);
5266 EXPORT_SYMBOL(zfs_getpage
);
5267 EXPORT_SYMBOL(zfs_putpage
);
5268 EXPORT_SYMBOL(zfs_dirty_inode
);
5269 EXPORT_SYMBOL(zfs_map
);
5272 module_param(zfs_delete_blocks
, ulong
, 0644);
5273 MODULE_PARM_DESC(zfs_delete_blocks
, "Delete files larger than N blocks async");
5274 module_param(zfs_read_chunk_size
, long, 0644);
5275 MODULE_PARM_DESC(zfs_read_chunk_size
, "Bytes to read per chunk");