4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
25 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
26 * Copyright 2017 Nexenta Systems, Inc.
27 * Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
30 /* Portions Copyright 2007 Jeremy Teo */
31 /* Portions Copyright 2010 Robert Milkowski */
33 #include <sys/types.h>
34 #include <sys/param.h>
36 #include <sys/sysmacros.h>
38 #include <sys/uio_impl.h>
42 #include <sys/cmn_err.h>
43 #include <sys/errno.h>
44 #include <sys/zfs_dir.h>
45 #include <sys/zfs_acl.h>
46 #include <sys/zfs_ioctl.h>
47 #include <sys/fs/zfs.h>
49 #include <sys/dmu_objset.h>
50 #include <sys/dsl_crypt.h>
54 #include <sys/policy.h>
55 #include <sys/zfeature.h>
56 #include <sys/zfs_vnops.h>
57 #include <sys/zfs_quota.h>
58 #include <sys/zfs_vfsops.h>
59 #include <sys/zfs_znode.h>
63 zfs_fsync(znode_t
*zp
, int syncflag
, cred_t
*cr
)
66 zfsvfs_t
*zfsvfs
= ZTOZSB(zp
);
68 if (zfsvfs
->z_os
->os_sync
!= ZFS_SYNC_DISABLED
) {
69 if ((error
= zfs_enter_verify_zp(zfsvfs
, zp
, FTAG
)) != 0)
71 atomic_inc_32(&zp
->z_sync_writes_cnt
);
72 zil_commit(zfsvfs
->z_log
, zp
->z_id
);
73 atomic_dec_32(&zp
->z_sync_writes_cnt
);
74 zfs_exit(zfsvfs
, FTAG
);
80 #if defined(SEEK_HOLE) && defined(SEEK_DATA)
82 * Lseek support for finding holes (cmd == SEEK_HOLE) and
83 * data (cmd == SEEK_DATA). "off" is an in/out parameter.
86 zfs_holey_common(znode_t
*zp
, ulong_t cmd
, loff_t
*off
)
88 zfs_locked_range_t
*lr
;
89 uint64_t noff
= (uint64_t)*off
; /* new offset */
95 if (noff
>= file_sz
) {
96 return (SET_ERROR(ENXIO
));
99 if (cmd
== F_SEEK_HOLE
)
104 /* Flush any mmap()'d data to disk */
105 if (zn_has_cached_data(zp
, 0, file_sz
- 1))
106 zn_flush_cached_data(zp
, B_FALSE
);
108 lr
= zfs_rangelock_enter(&zp
->z_rangelock
, 0, UINT64_MAX
, RL_READER
);
109 error
= dmu_offset_next(ZTOZSB(zp
)->z_os
, zp
->z_id
, hole
, &noff
);
110 zfs_rangelock_exit(lr
);
113 return (SET_ERROR(ENXIO
));
115 /* File was dirty, so fall back to using generic logic */
116 if (error
== EBUSY
) {
124 * We could find a hole that begins after the logical end-of-file,
125 * because dmu_offset_next() only works on whole blocks. If the
126 * EOF falls mid-block, then indicate that the "virtual hole"
127 * at the end of the file begins at the logical EOF, rather than
128 * at the end of the last block.
130 if (noff
> file_sz
) {
142 zfs_holey(znode_t
*zp
, ulong_t cmd
, loff_t
*off
)
144 zfsvfs_t
*zfsvfs
= ZTOZSB(zp
);
147 if ((error
= zfs_enter_verify_zp(zfsvfs
, zp
, FTAG
)) != 0)
150 error
= zfs_holey_common(zp
, cmd
, off
);
152 zfs_exit(zfsvfs
, FTAG
);
155 #endif /* SEEK_HOLE && SEEK_DATA */
158 zfs_access(znode_t
*zp
, int mode
, int flag
, cred_t
*cr
)
160 zfsvfs_t
*zfsvfs
= ZTOZSB(zp
);
163 if ((error
= zfs_enter_verify_zp(zfsvfs
, zp
, FTAG
)) != 0)
166 if (flag
& V_ACE_MASK
)
167 #if defined(__linux__)
168 error
= zfs_zaccess(zp
, mode
, flag
, B_FALSE
, cr
,
171 error
= zfs_zaccess(zp
, mode
, flag
, B_FALSE
, cr
,
175 #if defined(__linux__)
176 error
= zfs_zaccess_rwx(zp
, mode
, flag
, cr
, zfs_init_idmap
);
178 error
= zfs_zaccess_rwx(zp
, mode
, flag
, cr
, NULL
);
181 zfs_exit(zfsvfs
, FTAG
);
185 static uint64_t zfs_vnops_read_chunk_size
= 1024 * 1024; /* Tunable */
188 * Read bytes from specified file into supplied buffer.
190 * IN: zp - inode of file to be read from.
191 * uio - structure supplying read location, range info,
193 * ioflag - O_SYNC flags; used to provide FRSYNC semantics.
194 * O_DIRECT flag; used to bypass page cache.
195 * cr - credentials of caller.
197 * OUT: uio - updated offset and range, buffer filled.
199 * RETURN: 0 on success, error code on failure.
202 * inode - atime updated if byte count > 0
205 zfs_read(struct znode
*zp
, zfs_uio_t
*uio
, int ioflag
, cred_t
*cr
)
209 boolean_t frsync
= B_FALSE
;
211 zfsvfs_t
*zfsvfs
= ZTOZSB(zp
);
212 if ((error
= zfs_enter_verify_zp(zfsvfs
, zp
, FTAG
)) != 0)
215 if (zp
->z_pflags
& ZFS_AV_QUARANTINED
) {
216 zfs_exit(zfsvfs
, FTAG
);
217 return (SET_ERROR(EACCES
));
220 /* We don't copy out anything useful for directories. */
221 if (Z_ISDIR(ZTOTYPE(zp
))) {
222 zfs_exit(zfsvfs
, FTAG
);
223 return (SET_ERROR(EISDIR
));
227 * Validate file offset
229 if (zfs_uio_offset(uio
) < (offset_t
)0) {
230 zfs_exit(zfsvfs
, FTAG
);
231 return (SET_ERROR(EINVAL
));
235 * Fasttrack empty reads
237 if (zfs_uio_resid(uio
) == 0) {
238 zfs_exit(zfsvfs
, FTAG
);
244 * If we're in FRSYNC mode, sync out this znode before reading it.
245 * Only do this for non-snapshots.
247 * Some platforms do not support FRSYNC and instead map it
248 * to O_SYNC, which results in unnecessary calls to zil_commit. We
249 * only honor FRSYNC requests on platforms which support it.
251 frsync
= !!(ioflag
& FRSYNC
);
254 (frsync
|| zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
))
255 zil_commit(zfsvfs
->z_log
, zp
->z_id
);
258 * Lock the range against changes.
260 zfs_locked_range_t
*lr
= zfs_rangelock_enter(&zp
->z_rangelock
,
261 zfs_uio_offset(uio
), zfs_uio_resid(uio
), RL_READER
);
264 * If we are reading past end-of-file we can skip
265 * to the end; but we might still need to set atime.
267 if (zfs_uio_offset(uio
) >= zp
->z_size
) {
272 ASSERT(zfs_uio_offset(uio
) < zp
->z_size
);
273 #if defined(__linux__)
274 ssize_t start_offset
= zfs_uio_offset(uio
);
276 ssize_t n
= MIN(zfs_uio_resid(uio
), zp
->z_size
- zfs_uio_offset(uio
));
277 ssize_t start_resid
= n
;
280 ssize_t nbytes
= MIN(n
, zfs_vnops_read_chunk_size
-
281 P2PHASE(zfs_uio_offset(uio
), zfs_vnops_read_chunk_size
));
283 if (zfs_uio_segflg(uio
) == UIO_NOCOPY
)
284 error
= mappedread_sf(zp
, nbytes
, uio
);
287 if (zn_has_cached_data(zp
, zfs_uio_offset(uio
),
288 zfs_uio_offset(uio
) + nbytes
- 1) && !(ioflag
& O_DIRECT
)) {
289 error
= mappedread(zp
, nbytes
, uio
);
291 error
= dmu_read_uio_dbuf(sa_get_db(zp
->z_sa_hdl
),
296 /* convert checksum errors into IO errors */
298 error
= SET_ERROR(EIO
);
300 #if defined(__linux__)
302 * if we actually read some bytes, bubbling EFAULT
303 * up to become EAGAIN isn't what we want here...
305 * ...on Linux, at least. On FBSD, doing this breaks.
307 if (error
== EFAULT
&&
308 (zfs_uio_offset(uio
) - start_offset
) != 0)
317 int64_t nread
= start_resid
- n
;
318 dataset_kstats_update_read_kstats(&zfsvfs
->z_kstat
, nread
);
319 task_io_account_read(nread
);
321 zfs_rangelock_exit(lr
);
323 ZFS_ACCESSTIME_STAMP(zfsvfs
, zp
);
324 zfs_exit(zfsvfs
, FTAG
);
329 zfs_clear_setid_bits_if_necessary(zfsvfs_t
*zfsvfs
, znode_t
*zp
, cred_t
*cr
,
330 uint64_t *clear_setid_bits_txgp
, dmu_tx_t
*tx
)
332 zilog_t
*zilog
= zfsvfs
->z_log
;
333 const uint64_t uid
= KUID_TO_SUID(ZTOUID(zp
));
335 ASSERT(clear_setid_bits_txgp
!= NULL
);
339 * Clear Set-UID/Set-GID bits on successful write if not
340 * privileged and at least one of the execute bits is set.
342 * It would be nice to do this after all writes have
343 * been done, but that would still expose the ISUID/ISGID
344 * to another app after the partial write is committed.
346 * Note: we don't call zfs_fuid_map_id() here because
347 * user 0 is not an ephemeral uid.
349 mutex_enter(&zp
->z_acl_lock
);
350 if ((zp
->z_mode
& (S_IXUSR
| (S_IXUSR
>> 3) | (S_IXUSR
>> 6))) != 0 &&
351 (zp
->z_mode
& (S_ISUID
| S_ISGID
)) != 0 &&
352 secpolicy_vnode_setid_retain(zp
, cr
,
353 ((zp
->z_mode
& S_ISUID
) != 0 && uid
== 0)) != 0) {
356 zp
->z_mode
&= ~(S_ISUID
| S_ISGID
);
357 newmode
= zp
->z_mode
;
358 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_MODE(zfsvfs
),
359 (void *)&newmode
, sizeof (uint64_t), tx
);
361 mutex_exit(&zp
->z_acl_lock
);
364 * Make sure SUID/SGID bits will be removed when we replay the
365 * log. If the setid bits are keep coming back, don't log more
366 * than one TX_SETATTR per transaction group.
368 if (*clear_setid_bits_txgp
!= dmu_tx_get_txg(tx
)) {
371 va
.va_mask
= ATTR_MODE
;
372 va
.va_nodeid
= zp
->z_id
;
373 va
.va_mode
= newmode
;
374 zfs_log_setattr(zilog
, tx
, TX_SETATTR
, zp
, &va
,
376 *clear_setid_bits_txgp
= dmu_tx_get_txg(tx
);
379 mutex_exit(&zp
->z_acl_lock
);
384 * Write the bytes to a file.
386 * IN: zp - znode of file to be written to.
387 * uio - structure supplying write location, range info,
389 * ioflag - O_APPEND flag set if in append mode.
390 * O_DIRECT flag; used to bypass page cache.
391 * cr - credentials of caller.
393 * OUT: uio - updated offset and range.
395 * RETURN: 0 if success
396 * error code if failure
399 * ip - ctime|mtime updated if byte count > 0
402 zfs_write(znode_t
*zp
, zfs_uio_t
*uio
, int ioflag
, cred_t
*cr
)
404 int error
= 0, error1
;
405 ssize_t start_resid
= zfs_uio_resid(uio
);
406 uint64_t clear_setid_bits_txg
= 0;
409 * Fasttrack empty write
411 ssize_t n
= start_resid
;
415 zfsvfs_t
*zfsvfs
= ZTOZSB(zp
);
416 if ((error
= zfs_enter_verify_zp(zfsvfs
, zp
, FTAG
)) != 0)
419 sa_bulk_attr_t bulk
[4];
421 uint64_t mtime
[2], ctime
[2];
422 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
, &mtime
, 16);
423 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
, &ctime
, 16);
424 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_SIZE(zfsvfs
), NULL
,
426 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zfsvfs
), NULL
,
430 * Callers might not be able to detect properly that we are read-only,
431 * so check it explicitly here.
433 if (zfs_is_readonly(zfsvfs
)) {
434 zfs_exit(zfsvfs
, FTAG
);
435 return (SET_ERROR(EROFS
));
439 * If immutable or not appending then return EPERM.
440 * Intentionally allow ZFS_READONLY through here.
441 * See zfs_zaccess_common()
443 if ((zp
->z_pflags
& ZFS_IMMUTABLE
) ||
444 ((zp
->z_pflags
& ZFS_APPENDONLY
) && !(ioflag
& O_APPEND
) &&
445 (zfs_uio_offset(uio
) < zp
->z_size
))) {
446 zfs_exit(zfsvfs
, FTAG
);
447 return (SET_ERROR(EPERM
));
451 * Validate file offset
453 offset_t woff
= ioflag
& O_APPEND
? zp
->z_size
: zfs_uio_offset(uio
);
455 zfs_exit(zfsvfs
, FTAG
);
456 return (SET_ERROR(EINVAL
));
460 * Pre-fault the pages to ensure slow (eg NFS) pages
463 ssize_t pfbytes
= MIN(n
, DMU_MAX_ACCESS
>> 1);
464 if (zfs_uio_prefaultpages(pfbytes
, uio
)) {
465 zfs_exit(zfsvfs
, FTAG
);
466 return (SET_ERROR(EFAULT
));
470 * If in append mode, set the io offset pointer to eof.
472 zfs_locked_range_t
*lr
;
473 if (ioflag
& O_APPEND
) {
475 * Obtain an appending range lock to guarantee file append
476 * semantics. We reset the write offset once we have the lock.
478 lr
= zfs_rangelock_enter(&zp
->z_rangelock
, 0, n
, RL_APPEND
);
479 woff
= lr
->lr_offset
;
480 if (lr
->lr_length
== UINT64_MAX
) {
482 * We overlocked the file because this write will cause
483 * the file block size to increase.
484 * Note that zp_size cannot change with this lock held.
488 zfs_uio_setoffset(uio
, woff
);
491 * Note that if the file block size will change as a result of
492 * this write, then this range lock will lock the entire file
493 * so that we can re-write the block safely.
495 lr
= zfs_rangelock_enter(&zp
->z_rangelock
, woff
, n
, RL_WRITER
);
498 if (zn_rlimit_fsize_uio(zp
, uio
)) {
499 zfs_rangelock_exit(lr
);
500 zfs_exit(zfsvfs
, FTAG
);
501 return (SET_ERROR(EFBIG
));
504 const rlim64_t limit
= MAXOFFSET_T
;
507 zfs_rangelock_exit(lr
);
508 zfs_exit(zfsvfs
, FTAG
);
509 return (SET_ERROR(EFBIG
));
512 if (n
> limit
- woff
)
515 uint64_t end_size
= MAX(zp
->z_size
, woff
+ n
);
516 zilog_t
*zilog
= zfsvfs
->z_log
;
517 boolean_t commit
= (ioflag
& (O_SYNC
| O_DSYNC
)) ||
518 (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
);
520 const uint64_t uid
= KUID_TO_SUID(ZTOUID(zp
));
521 const uint64_t gid
= KGID_TO_SGID(ZTOGID(zp
));
522 const uint64_t projid
= zp
->z_projid
;
525 * Write the file in reasonable size chunks. Each chunk is written
526 * in a separate transaction; this keeps the intent log records small
527 * and allows us to do more fine-grained space accounting.
530 woff
= zfs_uio_offset(uio
);
532 if (zfs_id_overblockquota(zfsvfs
, DMU_USERUSED_OBJECT
, uid
) ||
533 zfs_id_overblockquota(zfsvfs
, DMU_GROUPUSED_OBJECT
, gid
) ||
534 (projid
!= ZFS_DEFAULT_PROJID
&&
535 zfs_id_overblockquota(zfsvfs
, DMU_PROJECTUSED_OBJECT
,
537 error
= SET_ERROR(EDQUOT
);
542 if (lr
->lr_length
== UINT64_MAX
&& zp
->z_size
<= zp
->z_blksz
) {
543 if (zp
->z_blksz
> zfsvfs
->z_max_blksz
&&
544 !ISP2(zp
->z_blksz
)) {
546 * File's blocksize is already larger than the
547 * "recordsize" property. Only let it grow to
548 * the next power of 2.
550 blksz
= 1 << highbit64(zp
->z_blksz
);
552 blksz
= zfsvfs
->z_max_blksz
;
554 blksz
= MIN(blksz
, P2ROUNDUP(end_size
,
556 blksz
= MAX(blksz
, zp
->z_blksz
);
561 arc_buf_t
*abuf
= NULL
;
563 if (n
>= blksz
&& woff
>= zp
->z_size
&&
564 P2PHASE(woff
, blksz
) == 0 &&
565 (blksz
>= SPA_OLD_MAXBLOCKSIZE
|| n
< 4 * blksz
)) {
567 * This write covers a full block. "Borrow" a buffer
568 * from the dmu so that we can fill it before we enter
569 * a transaction. This avoids the possibility of
570 * holding up the transaction if the data copy hangs
571 * up on a pagefault (e.g., from an NFS server mapping).
573 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
575 ASSERT(abuf
!= NULL
);
576 ASSERT(arc_buf_size(abuf
) == blksz
);
577 if ((error
= zfs_uiocopy(abuf
->b_data
, blksz
,
578 UIO_WRITE
, uio
, &nbytes
))) {
579 dmu_return_arcbuf(abuf
);
582 ASSERT3S(nbytes
, ==, blksz
);
584 nbytes
= MIN(n
, (DMU_MAX_ACCESS
>> 1) -
585 P2PHASE(woff
, blksz
));
586 if (pfbytes
< nbytes
) {
587 if (zfs_uio_prefaultpages(nbytes
, uio
)) {
588 error
= SET_ERROR(EFAULT
);
596 * Start a transaction.
598 dmu_tx_t
*tx
= dmu_tx_create(zfsvfs
->z_os
);
599 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
600 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)sa_get_db(zp
->z_sa_hdl
);
602 dmu_tx_hold_write_by_dnode(tx
, DB_DNODE(db
), woff
, nbytes
);
604 zfs_sa_upgrade_txholds(tx
, zp
);
605 error
= dmu_tx_assign(tx
, TXG_WAIT
);
609 dmu_return_arcbuf(abuf
);
614 * NB: We must call zfs_clear_setid_bits_if_necessary before
615 * committing the transaction!
619 * If rangelock_enter() over-locked we grow the blocksize
620 * and then reduce the lock range. This will only happen
621 * on the first iteration since rangelock_reduce() will
622 * shrink down lr_length to the appropriate size.
624 if (lr
->lr_length
== UINT64_MAX
) {
625 zfs_grow_blocksize(zp
, blksz
, tx
);
626 zfs_rangelock_reduce(lr
, woff
, n
);
631 tx_bytes
= zfs_uio_resid(uio
);
632 zfs_uio_fault_disable(uio
, B_TRUE
);
633 error
= dmu_write_uio_dbuf(sa_get_db(zp
->z_sa_hdl
),
635 zfs_uio_fault_disable(uio
, B_FALSE
);
637 if (error
== EFAULT
) {
638 zfs_clear_setid_bits_if_necessary(zfsvfs
, zp
,
639 cr
, &clear_setid_bits_txg
, tx
);
642 * Account for partial writes before
643 * continuing the loop.
644 * Update needs to occur before the next
645 * zfs_uio_prefaultpages, or prefaultpages may
646 * error, and we may break the loop early.
648 n
-= tx_bytes
- zfs_uio_resid(uio
);
649 pfbytes
-= tx_bytes
- zfs_uio_resid(uio
);
654 * On FreeBSD, EFAULT should be propagated back to the
655 * VFS, which will handle faulting and will retry.
657 if (error
!= 0 && error
!= EFAULT
) {
658 zfs_clear_setid_bits_if_necessary(zfsvfs
, zp
,
659 cr
, &clear_setid_bits_txg
, tx
);
663 tx_bytes
-= zfs_uio_resid(uio
);
666 * Thus, we're writing a full block at a block-aligned
667 * offset and extending the file past EOF.
669 * dmu_assign_arcbuf_by_dbuf() will directly assign the
670 * arc buffer to a dbuf.
672 error
= dmu_assign_arcbuf_by_dbuf(
673 sa_get_db(zp
->z_sa_hdl
), woff
, abuf
, tx
);
676 * XXX This might not be necessary if
677 * dmu_assign_arcbuf_by_dbuf is guaranteed
680 zfs_clear_setid_bits_if_necessary(zfsvfs
, zp
,
681 cr
, &clear_setid_bits_txg
, tx
);
682 dmu_return_arcbuf(abuf
);
686 ASSERT3S(nbytes
, <=, zfs_uio_resid(uio
));
687 zfs_uioskip(uio
, nbytes
);
691 zn_has_cached_data(zp
, woff
, woff
+ tx_bytes
- 1) &&
692 !(ioflag
& O_DIRECT
)) {
693 update_pages(zp
, woff
, tx_bytes
, zfsvfs
->z_os
);
697 * If we made no progress, we're done. If we made even
698 * partial progress, update the znode and ZIL accordingly.
701 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_SIZE(zfsvfs
),
702 (void *)&zp
->z_size
, sizeof (uint64_t), tx
);
708 zfs_clear_setid_bits_if_necessary(zfsvfs
, zp
, cr
,
709 &clear_setid_bits_txg
, tx
);
711 zfs_tstamp_update_setup(zp
, CONTENT_MODIFIED
, mtime
, ctime
);
714 * Update the file size (zp_size) if it has changed;
715 * account for possible concurrent updates.
717 while ((end_size
= zp
->z_size
) < zfs_uio_offset(uio
)) {
718 (void) atomic_cas_64(&zp
->z_size
, end_size
,
719 zfs_uio_offset(uio
));
720 ASSERT(error
== 0 || error
== EFAULT
);
723 * If we are replaying and eof is non zero then force
724 * the file size to the specified eof. Note, there's no
725 * concurrency during replay.
727 if (zfsvfs
->z_replay
&& zfsvfs
->z_replay_eof
!= 0)
728 zp
->z_size
= zfsvfs
->z_replay_eof
;
730 error1
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, count
, tx
);
732 /* Avoid clobbering EFAULT. */
736 * NB: During replay, the TX_SETATTR record logged by
737 * zfs_clear_setid_bits_if_necessary must precede any of
738 * the TX_WRITE records logged here.
740 zfs_log_write(zilog
, tx
, TX_WRITE
, zp
, woff
, tx_bytes
, commit
,
747 ASSERT3S(tx_bytes
, ==, nbytes
);
752 zfs_znode_update_vfs(zp
);
753 zfs_rangelock_exit(lr
);
756 * If we're in replay mode, or we made no progress, or the
757 * uio data is inaccessible return an error. Otherwise, it's
758 * at least a partial write, so it's successful.
760 if (zfsvfs
->z_replay
|| zfs_uio_resid(uio
) == start_resid
||
762 zfs_exit(zfsvfs
, FTAG
);
767 zil_commit(zilog
, zp
->z_id
);
769 const int64_t nwritten
= start_resid
- zfs_uio_resid(uio
);
770 dataset_kstats_update_write_kstats(&zfsvfs
->z_kstat
, nwritten
);
771 task_io_account_write(nwritten
);
773 zfs_exit(zfsvfs
, FTAG
);
778 zfs_getsecattr(znode_t
*zp
, vsecattr_t
*vsecp
, int flag
, cred_t
*cr
)
780 zfsvfs_t
*zfsvfs
= ZTOZSB(zp
);
782 boolean_t skipaclchk
= (flag
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
784 if ((error
= zfs_enter_verify_zp(zfsvfs
, zp
, FTAG
)) != 0)
786 error
= zfs_getacl(zp
, vsecp
, skipaclchk
, cr
);
787 zfs_exit(zfsvfs
, FTAG
);
793 zfs_setsecattr(znode_t
*zp
, vsecattr_t
*vsecp
, int flag
, cred_t
*cr
)
795 zfsvfs_t
*zfsvfs
= ZTOZSB(zp
);
797 boolean_t skipaclchk
= (flag
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
798 zilog_t
*zilog
= zfsvfs
->z_log
;
800 if ((error
= zfs_enter_verify_zp(zfsvfs
, zp
, FTAG
)) != 0)
803 error
= zfs_setacl(zp
, vsecp
, skipaclchk
, cr
);
805 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
806 zil_commit(zilog
, 0);
808 zfs_exit(zfsvfs
, FTAG
);
813 static int zil_fault_io
= 0;
816 static void zfs_get_done(zgd_t
*zgd
, int error
);
819 * Get data to generate a TX_WRITE intent log record.
822 zfs_get_data(void *arg
, uint64_t gen
, lr_write_t
*lr
, char *buf
,
823 struct lwb
*lwb
, zio_t
*zio
)
825 zfsvfs_t
*zfsvfs
= arg
;
826 objset_t
*os
= zfsvfs
->z_os
;
828 uint64_t object
= lr
->lr_foid
;
829 uint64_t offset
= lr
->lr_offset
;
830 uint64_t size
= lr
->lr_length
;
836 ASSERT3P(lwb
, !=, NULL
);
837 ASSERT3U(size
, !=, 0);
840 * Nothing to do if the file has been removed
842 if (zfs_zget(zfsvfs
, object
, &zp
) != 0)
843 return (SET_ERROR(ENOENT
));
844 if (zp
->z_unlinked
) {
846 * Release the vnode asynchronously as we currently have the
847 * txg stopped from syncing.
850 return (SET_ERROR(ENOENT
));
852 /* check if generation number matches */
853 if (sa_lookup(zp
->z_sa_hdl
, SA_ZPL_GEN(zfsvfs
), &zp_gen
,
854 sizeof (zp_gen
)) != 0) {
856 return (SET_ERROR(EIO
));
860 return (SET_ERROR(ENOENT
));
863 zgd
= kmem_zalloc(sizeof (zgd_t
), KM_SLEEP
);
865 zgd
->zgd_private
= zp
;
868 * Write records come in two flavors: immediate and indirect.
869 * For small writes it's cheaper to store the data with the
870 * log record (immediate); for large writes it's cheaper to
871 * sync the data and get a pointer to it (indirect) so that
872 * we don't have to write the data twice.
874 if (buf
!= NULL
) { /* immediate write */
875 zgd
->zgd_lr
= zfs_rangelock_enter(&zp
->z_rangelock
,
876 offset
, size
, RL_READER
);
877 /* test for truncation needs to be done while range locked */
878 if (offset
>= zp
->z_size
) {
879 error
= SET_ERROR(ENOENT
);
881 error
= dmu_read(os
, object
, offset
, size
, buf
,
882 DMU_READ_NO_PREFETCH
);
884 ASSERT(error
== 0 || error
== ENOENT
);
885 } else { /* indirect write */
886 ASSERT3P(zio
, !=, NULL
);
888 * Have to lock the whole block to ensure when it's
889 * written out and its checksum is being calculated
890 * that no one can change the data. We need to re-check
891 * blocksize after we get the lock in case it's changed!
896 blkoff
= ISP2(size
) ? P2PHASE(offset
, size
) : offset
;
898 zgd
->zgd_lr
= zfs_rangelock_enter(&zp
->z_rangelock
,
899 offset
, size
, RL_READER
);
900 if (zp
->z_blksz
== size
)
903 zfs_rangelock_exit(zgd
->zgd_lr
);
905 /* test for truncation needs to be done while range locked */
906 if (lr
->lr_offset
>= zp
->z_size
)
907 error
= SET_ERROR(ENOENT
);
910 error
= SET_ERROR(EIO
);
915 error
= dmu_buf_hold_noread(os
, object
, offset
, zgd
,
919 blkptr_t
*bp
= &lr
->lr_blkptr
;
924 ASSERT(db
->db_offset
== offset
);
925 ASSERT(db
->db_size
== size
);
927 error
= dmu_sync(zio
, lr
->lr_common
.lrc_txg
,
929 ASSERT(error
|| lr
->lr_length
<= size
);
932 * On success, we need to wait for the write I/O
933 * initiated by dmu_sync() to complete before we can
934 * release this dbuf. We will finish everything up
935 * in the zfs_get_done() callback.
940 if (error
== EALREADY
) {
941 lr
->lr_common
.lrc_txtype
= TX_WRITE2
;
943 * TX_WRITE2 relies on the data previously
944 * written by the TX_WRITE that caused
945 * EALREADY. We zero out the BP because
946 * it is the old, currently-on-disk BP.
955 zfs_get_done(zgd
, error
);
962 zfs_get_done(zgd_t
*zgd
, int error
)
965 znode_t
*zp
= zgd
->zgd_private
;
968 dmu_buf_rele(zgd
->zgd_db
, zgd
);
970 zfs_rangelock_exit(zgd
->zgd_lr
);
973 * Release the vnode asynchronously as we currently have the
974 * txg stopped from syncing.
978 kmem_free(zgd
, sizeof (zgd_t
));
982 zfs_enter_two(zfsvfs_t
*zfsvfs1
, zfsvfs_t
*zfsvfs2
, const char *tag
)
986 /* Swap. Not sure if the order of zfs_enter()s is important. */
987 if (zfsvfs1
> zfsvfs2
) {
995 error
= zfs_enter(zfsvfs1
, tag
);
998 if (zfsvfs1
!= zfsvfs2
) {
999 error
= zfs_enter(zfsvfs2
, tag
);
1001 zfs_exit(zfsvfs1
, tag
);
1010 zfs_exit_two(zfsvfs_t
*zfsvfs1
, zfsvfs_t
*zfsvfs2
, const char *tag
)
1013 zfs_exit(zfsvfs1
, tag
);
1014 if (zfsvfs1
!= zfsvfs2
)
1015 zfs_exit(zfsvfs2
, tag
);
1019 * We split each clone request in chunks that can fit into a single ZIL
1020 * log entry. Each ZIL log entry can fit 130816 bytes for a block cloning
1021 * operation (see zil_max_log_data() and zfs_log_clone_range()). This gives
1022 * us room for storing 1022 block pointers.
1024 * On success, the function return the number of bytes copied in *lenp.
1025 * Note, it doesn't return how much bytes are left to be copied.
1026 * On errors which are caused by any file system limitations or
1027 * brt limitations `EINVAL` is returned. In the most cases a user
1028 * requested bad parameters, it could be possible to clone the file but
1029 * some parameters don't match the requirements.
1032 zfs_clone_range(znode_t
*inzp
, uint64_t *inoffp
, znode_t
*outzp
,
1033 uint64_t *outoffp
, uint64_t *lenp
, cred_t
*cr
)
1035 zfsvfs_t
*inzfsvfs
, *outzfsvfs
;
1036 objset_t
*inos
, *outos
;
1037 zfs_locked_range_t
*inlr
, *outlr
;
1041 uint64_t inoff
, outoff
, len
, done
;
1042 uint64_t outsize
, size
;
1045 sa_bulk_attr_t bulk
[3];
1046 uint64_t mtime
[2], ctime
[2];
1047 uint64_t uid
, gid
, projid
;
1049 size_t maxblocks
, nbps
;
1051 uint64_t clear_setid_bits_txg
= 0;
1058 inzfsvfs
= ZTOZSB(inzp
);
1059 outzfsvfs
= ZTOZSB(outzp
);
1062 * We need to call zfs_enter() potentially on two different datasets,
1063 * so we need a dedicated function for that.
1065 error
= zfs_enter_two(inzfsvfs
, outzfsvfs
, FTAG
);
1069 inos
= inzfsvfs
->z_os
;
1070 outos
= outzfsvfs
->z_os
;
1073 * Both source and destination have to belong to the same storage pool.
1075 if (dmu_objset_spa(inos
) != dmu_objset_spa(outos
)) {
1076 zfs_exit_two(inzfsvfs
, outzfsvfs
, FTAG
);
1077 return (SET_ERROR(EXDEV
));
1081 * outos and inos belongs to the same storage pool.
1082 * see a few lines above, only one check.
1084 if (!spa_feature_is_enabled(dmu_objset_spa(outos
),
1085 SPA_FEATURE_BLOCK_CLONING
)) {
1086 zfs_exit_two(inzfsvfs
, outzfsvfs
, FTAG
);
1087 return (SET_ERROR(EOPNOTSUPP
));
1090 ASSERT(!outzfsvfs
->z_replay
);
1093 * Block cloning from an unencrypted dataset into an encrypted
1094 * dataset and vice versa is not supported.
1096 if (inos
->os_encrypted
!= outos
->os_encrypted
) {
1097 zfs_exit_two(inzfsvfs
, outzfsvfs
, FTAG
);
1098 return (SET_ERROR(EXDEV
));
1102 * Cloning across encrypted datasets is possible only if they
1103 * share the same master key.
1105 if (inos
!= outos
&& inos
->os_encrypted
&&
1106 !dmu_objset_crypto_key_equal(inos
, outos
)) {
1107 zfs_exit_two(inzfsvfs
, outzfsvfs
, FTAG
);
1108 return (SET_ERROR(EXDEV
));
1111 error
= zfs_verify_zp(inzp
);
1113 error
= zfs_verify_zp(outzp
);
1115 zfs_exit_two(inzfsvfs
, outzfsvfs
, FTAG
);
1120 * We don't copy source file's flags that's why we don't allow to clone
1121 * files that are in quarantine.
1123 if (inzp
->z_pflags
& ZFS_AV_QUARANTINED
) {
1124 zfs_exit_two(inzfsvfs
, outzfsvfs
, FTAG
);
1125 return (SET_ERROR(EACCES
));
1128 if (inoff
>= inzp
->z_size
) {
1130 zfs_exit_two(inzfsvfs
, outzfsvfs
, FTAG
);
1133 if (len
> inzp
->z_size
- inoff
) {
1134 len
= inzp
->z_size
- inoff
;
1138 zfs_exit_two(inzfsvfs
, outzfsvfs
, FTAG
);
1143 * Callers might not be able to detect properly that we are read-only,
1144 * so check it explicitly here.
1146 if (zfs_is_readonly(outzfsvfs
)) {
1147 zfs_exit_two(inzfsvfs
, outzfsvfs
, FTAG
);
1148 return (SET_ERROR(EROFS
));
1152 * If immutable or not appending then return EPERM.
1153 * Intentionally allow ZFS_READONLY through here.
1154 * See zfs_zaccess_common()
1156 if ((outzp
->z_pflags
& ZFS_IMMUTABLE
) != 0) {
1157 zfs_exit_two(inzfsvfs
, outzfsvfs
, FTAG
);
1158 return (SET_ERROR(EPERM
));
1162 * No overlapping if we are cloning within the same file.
1164 if (inzp
== outzp
) {
1165 if (inoff
< outoff
+ len
&& outoff
< inoff
+ len
) {
1166 zfs_exit_two(inzfsvfs
, outzfsvfs
, FTAG
);
1167 return (SET_ERROR(EINVAL
));
1172 * Maintain predictable lock order.
1174 if (inzp
< outzp
|| (inzp
== outzp
&& inoff
< outoff
)) {
1175 inlr
= zfs_rangelock_enter(&inzp
->z_rangelock
, inoff
, len
,
1177 outlr
= zfs_rangelock_enter(&outzp
->z_rangelock
, outoff
, len
,
1180 outlr
= zfs_rangelock_enter(&outzp
->z_rangelock
, outoff
, len
,
1182 inlr
= zfs_rangelock_enter(&inzp
->z_rangelock
, inoff
, len
,
1186 inblksz
= inzp
->z_blksz
;
1189 * We cannot clone into files with different block size if we can't
1190 * grow it (block size is already bigger or more than one block).
1192 if (inblksz
!= outzp
->z_blksz
&& (outzp
->z_size
> outzp
->z_blksz
||
1193 outzp
->z_size
> inblksz
)) {
1194 error
= SET_ERROR(EINVAL
);
1199 * Block size must be power-of-2 if destination offset != 0.
1200 * There can be no multiple blocks of non-power-of-2 size.
1202 if (outoff
!= 0 && !ISP2(inblksz
)) {
1203 error
= SET_ERROR(EINVAL
);
1208 * Offsets and len must be at block boundries.
1210 if ((inoff
% inblksz
) != 0 || (outoff
% inblksz
) != 0) {
1211 error
= SET_ERROR(EINVAL
);
1215 * Length must be multipe of blksz, except for the end of the file.
1217 if ((len
% inblksz
) != 0 &&
1218 (len
< inzp
->z_size
- inoff
|| len
< outzp
->z_size
- outoff
)) {
1219 error
= SET_ERROR(EINVAL
);
1224 * If we are copying only one block and it is smaller than recordsize
1225 * property, do not allow destination to grow beyond one block if it
1226 * is not there yet. Otherwise the destination will get stuck with
1227 * that block size forever, that can be as small as 512 bytes, no
1228 * matter how big the destination grow later.
1230 if (len
<= inblksz
&& inblksz
< outzfsvfs
->z_max_blksz
&&
1231 outzp
->z_size
<= inblksz
&& outoff
+ len
> inblksz
) {
1232 error
= SET_ERROR(EINVAL
);
1236 error
= zn_rlimit_fsize(outoff
+ len
);
1241 if (inoff
>= MAXOFFSET_T
|| outoff
>= MAXOFFSET_T
) {
1242 error
= SET_ERROR(EFBIG
);
1246 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(outzfsvfs
), NULL
,
1248 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(outzfsvfs
), NULL
,
1250 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_SIZE(outzfsvfs
), NULL
,
1253 zilog
= outzfsvfs
->z_log
;
1254 maxblocks
= zil_max_log_data(zilog
, sizeof (lr_clone_range_t
)) /
1257 uid
= KUID_TO_SUID(ZTOUID(outzp
));
1258 gid
= KGID_TO_SGID(ZTOGID(outzp
));
1259 projid
= outzp
->z_projid
;
1261 bps
= vmem_alloc(sizeof (bps
[0]) * maxblocks
, KM_SLEEP
);
1264 * Clone the file in reasonable size chunks. Each chunk is cloned
1265 * in a separate transaction; this keeps the intent log records small
1266 * and allows us to do more fine-grained space accounting.
1269 size
= MIN(inblksz
* maxblocks
, len
);
1271 if (zfs_id_overblockquota(outzfsvfs
, DMU_USERUSED_OBJECT
,
1273 zfs_id_overblockquota(outzfsvfs
, DMU_GROUPUSED_OBJECT
,
1275 (projid
!= ZFS_DEFAULT_PROJID
&&
1276 zfs_id_overblockquota(outzfsvfs
, DMU_PROJECTUSED_OBJECT
,
1278 error
= SET_ERROR(EDQUOT
);
1283 error
= dmu_read_l0_bps(inos
, inzp
->z_id
, inoff
, size
, bps
,
1287 * If we are trying to clone a block that was created
1288 * in the current transaction group, error will be
1289 * EAGAIN here, which we can just return to the caller
1290 * so it can fallback if it likes.
1296 * Start a transaction.
1298 tx
= dmu_tx_create(outos
);
1299 dmu_tx_hold_sa(tx
, outzp
->z_sa_hdl
, B_FALSE
);
1300 db
= (dmu_buf_impl_t
*)sa_get_db(outzp
->z_sa_hdl
);
1302 dmu_tx_hold_clone_by_dnode(tx
, DB_DNODE(db
), outoff
, size
);
1304 zfs_sa_upgrade_txholds(tx
, outzp
);
1305 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1312 * Copy source znode's block size. This only happens on the
1313 * first iteration since zfs_rangelock_reduce() will shrink down
1314 * lr_len to the appropriate size.
1316 if (outlr
->lr_length
== UINT64_MAX
) {
1317 zfs_grow_blocksize(outzp
, inblksz
, tx
);
1319 * Round range lock up to the block boundary, so we
1320 * prevent appends until we are done.
1322 zfs_rangelock_reduce(outlr
, outoff
,
1323 ((len
- 1) / inblksz
+ 1) * inblksz
);
1326 error
= dmu_brt_clone(outos
, outzp
->z_id
, outoff
, size
, tx
,
1333 zfs_clear_setid_bits_if_necessary(outzfsvfs
, outzp
, cr
,
1334 &clear_setid_bits_txg
, tx
);
1336 zfs_tstamp_update_setup(outzp
, CONTENT_MODIFIED
, mtime
, ctime
);
1339 * Update the file size (zp_size) if it has changed;
1340 * account for possible concurrent updates.
1342 while ((outsize
= outzp
->z_size
) < outoff
+ size
) {
1343 (void) atomic_cas_64(&outzp
->z_size
, outsize
,
1347 error
= sa_bulk_update(outzp
->z_sa_hdl
, bulk
, count
, tx
);
1349 zfs_log_clone_range(zilog
, tx
, TX_CLONE_RANGE
, outzp
, outoff
,
1350 size
, inblksz
, bps
, nbps
);
1363 vmem_free(bps
, sizeof (bps
[0]) * maxblocks
);
1364 zfs_znode_update_vfs(outzp
);
1367 zfs_rangelock_exit(outlr
);
1368 zfs_rangelock_exit(inlr
);
1372 * If we have made at least partial progress, reset the error.
1376 ZFS_ACCESSTIME_STAMP(inzfsvfs
, inzp
);
1378 if (outos
->os_sync
== ZFS_SYNC_ALWAYS
) {
1379 zil_commit(zilog
, outzp
->z_id
);
1387 * If we made no progress, there must be a good reason.
1388 * EOF is handled explicitly above, before the loop.
1390 ASSERT3S(error
, !=, 0);
1393 zfs_exit_two(inzfsvfs
, outzfsvfs
, FTAG
);
1399 * Usual pattern would be to call zfs_clone_range() from zfs_replay_clone(),
1400 * but we cannot do that, because when replaying we don't have source znode
1401 * available. This is why we need a dedicated replay function.
1404 zfs_clone_range_replay(znode_t
*zp
, uint64_t off
, uint64_t len
, uint64_t blksz
,
1405 const blkptr_t
*bps
, size_t nbps
)
1412 sa_bulk_attr_t bulk
[3];
1413 uint64_t mtime
[2], ctime
[2];
1415 ASSERT3U(off
, <, MAXOFFSET_T
);
1416 ASSERT3U(len
, >, 0);
1417 ASSERT3U(nbps
, >, 0);
1419 zfsvfs
= ZTOZSB(zp
);
1421 ASSERT(spa_feature_is_enabled(dmu_objset_spa(zfsvfs
->z_os
),
1422 SPA_FEATURE_BLOCK_CLONING
));
1424 if ((error
= zfs_enter_verify_zp(zfsvfs
, zp
, FTAG
)) != 0)
1427 ASSERT(zfsvfs
->z_replay
);
1428 ASSERT(!zfs_is_readonly(zfsvfs
));
1430 if ((off
% blksz
) != 0) {
1431 zfs_exit(zfsvfs
, FTAG
);
1432 return (SET_ERROR(EINVAL
));
1435 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
, &mtime
, 16);
1436 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
, &ctime
, 16);
1437 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_SIZE(zfsvfs
), NULL
,
1441 * Start a transaction.
1443 tx
= dmu_tx_create(zfsvfs
->z_os
);
1445 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
1446 db
= (dmu_buf_impl_t
*)sa_get_db(zp
->z_sa_hdl
);
1448 dmu_tx_hold_clone_by_dnode(tx
, DB_DNODE(db
), off
, len
);
1450 zfs_sa_upgrade_txholds(tx
, zp
);
1451 error
= dmu_tx_assign(tx
, TXG_WAIT
);
1454 zfs_exit(zfsvfs
, FTAG
);
1458 if (zp
->z_blksz
< blksz
)
1459 zfs_grow_blocksize(zp
, blksz
, tx
);
1461 dmu_brt_clone(zfsvfs
->z_os
, zp
->z_id
, off
, len
, tx
, bps
, nbps
);
1463 zfs_tstamp_update_setup(zp
, CONTENT_MODIFIED
, mtime
, ctime
);
1465 if (zp
->z_size
< off
+ len
)
1466 zp
->z_size
= off
+ len
;
1468 error
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, count
, tx
);
1471 * zil_replaying() not only check if we are replaying ZIL, but also
1472 * updates the ZIL header to record replay progress.
1474 VERIFY(zil_replaying(zfsvfs
->z_log
, tx
));
1478 zfs_znode_update_vfs(zp
);
1480 zfs_exit(zfsvfs
, FTAG
);
1485 EXPORT_SYMBOL(zfs_access
);
1486 EXPORT_SYMBOL(zfs_fsync
);
1487 EXPORT_SYMBOL(zfs_holey
);
1488 EXPORT_SYMBOL(zfs_read
);
1489 EXPORT_SYMBOL(zfs_write
);
1490 EXPORT_SYMBOL(zfs_getsecattr
);
1491 EXPORT_SYMBOL(zfs_setsecattr
);
1492 EXPORT_SYMBOL(zfs_clone_range
);
1493 EXPORT_SYMBOL(zfs_clone_range_replay
);
1495 ZFS_MODULE_PARAM(zfs_vnops
, zfs_vnops_
, read_chunk_size
, U64
, ZMOD_RW
,
1496 "Bytes to read per chunk");