4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2015, 2018 by Delphix. All rights reserved.
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/sysmacros.h>
30 #include <sys/cmn_err.h>
32 #include <sys/thread.h>
35 #include <sys/zfs_znode.h>
36 #include <sys/zfs_dir.h>
38 #include <sys/zil_impl.h>
39 #include <sys/byteorder.h>
40 #include <sys/policy.h>
46 #include <sys/zfs_fuid.h>
47 #include <sys/dsl_dataset.h>
50 * These zfs_log_* functions must be called within a dmu tx, in one
51 * of 2 contexts depending on zilog->z_replay:
55 * We need to record the transaction so that if it is committed to
56 * the Intent Log then it can be replayed. An intent log transaction
57 * structure (itx_t) is allocated and all the information necessary to
58 * possibly replay the transaction is saved in it. The itx is then assigned
59 * a sequence number and inserted in the in-memory list anchored in the zilog.
63 * We need to mark the intent log record as replayed in the log header.
64 * This is done in the same transaction as the replay so that they
69 zfs_log_create_txtype(zil_create_t type
, vsecattr_t
*vsecp
, vattr_t
*vap
)
71 int isxvattr
= (vap
->va_mask
& ATTR_XVATTR
);
74 if (vsecp
== NULL
&& !isxvattr
)
76 if (vsecp
&& isxvattr
)
77 return (TX_CREATE_ACL_ATTR
);
79 return (TX_CREATE_ACL
);
81 return (TX_CREATE_ATTR
);
83 if (vsecp
== NULL
&& !isxvattr
)
85 if (vsecp
&& isxvattr
)
86 return (TX_MKDIR_ACL_ATTR
);
88 return (TX_MKDIR_ACL
);
90 return (TX_MKDIR_ATTR
);
99 * build up the log data necessary for logging xvattr_t
100 * First lr_attr_t is initialized. following the lr_attr_t
101 * is the mapsize and attribute bitmap copied from the xvattr_t.
102 * Following the bitmap and bitmapsize two 64 bit words are reserved
103 * for the create time which may be set. Following the create time
104 * records a single 64 bit integer which has the bits to set on
105 * replay for the xvattr.
108 zfs_log_xvattr(lr_attr_t
*lrattr
, xvattr_t
*xvap
)
112 xoap
= xva_getxoptattr(xvap
);
115 lrattr
->lr_attr_masksize
= xvap
->xva_mapsize
;
116 uint32_t *bitmap
= &lrattr
->lr_attr_bitmap
;
117 for (int i
= 0; i
!= xvap
->xva_mapsize
; i
++, bitmap
++)
118 *bitmap
= xvap
->xva_reqattrmap
[i
];
120 lr_attr_end_t
*end
= (lr_attr_end_t
*)bitmap
;
121 end
->lr_attr_attrs
= 0;
122 end
->lr_attr_crtime
[0] = 0;
123 end
->lr_attr_crtime
[1] = 0;
124 memset(end
->lr_attr_scanstamp
, 0, AV_SCANSTAMP_SZ
);
126 if (XVA_ISSET_REQ(xvap
, XAT_READONLY
))
127 end
->lr_attr_attrs
|= (xoap
->xoa_readonly
== 0) ? 0 :
129 if (XVA_ISSET_REQ(xvap
, XAT_HIDDEN
))
130 end
->lr_attr_attrs
|= (xoap
->xoa_hidden
== 0) ? 0 :
132 if (XVA_ISSET_REQ(xvap
, XAT_SYSTEM
))
133 end
->lr_attr_attrs
|= (xoap
->xoa_system
== 0) ? 0 :
135 if (XVA_ISSET_REQ(xvap
, XAT_ARCHIVE
))
136 end
->lr_attr_attrs
|= (xoap
->xoa_archive
== 0) ? 0 :
138 if (XVA_ISSET_REQ(xvap
, XAT_IMMUTABLE
))
139 end
->lr_attr_attrs
|= (xoap
->xoa_immutable
== 0) ? 0 :
141 if (XVA_ISSET_REQ(xvap
, XAT_NOUNLINK
))
142 end
->lr_attr_attrs
|= (xoap
->xoa_nounlink
== 0) ? 0 :
144 if (XVA_ISSET_REQ(xvap
, XAT_APPENDONLY
))
145 end
->lr_attr_attrs
|= (xoap
->xoa_appendonly
== 0) ? 0 :
147 if (XVA_ISSET_REQ(xvap
, XAT_OPAQUE
))
148 end
->lr_attr_attrs
|= (xoap
->xoa_opaque
== 0) ? 0 :
150 if (XVA_ISSET_REQ(xvap
, XAT_NODUMP
))
151 end
->lr_attr_attrs
|= (xoap
->xoa_nodump
== 0) ? 0 :
153 if (XVA_ISSET_REQ(xvap
, XAT_AV_QUARANTINED
))
154 end
->lr_attr_attrs
|= (xoap
->xoa_av_quarantined
== 0) ? 0 :
156 if (XVA_ISSET_REQ(xvap
, XAT_AV_MODIFIED
))
157 end
->lr_attr_attrs
|= (xoap
->xoa_av_modified
== 0) ? 0 :
159 if (XVA_ISSET_REQ(xvap
, XAT_CREATETIME
))
160 ZFS_TIME_ENCODE(&xoap
->xoa_createtime
, end
->lr_attr_crtime
);
161 if (XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
)) {
162 ASSERT(!XVA_ISSET_REQ(xvap
, XAT_PROJID
));
164 memcpy(end
->lr_attr_scanstamp
, xoap
->xoa_av_scanstamp
,
166 } else if (XVA_ISSET_REQ(xvap
, XAT_PROJID
)) {
168 * XAT_PROJID and XAT_AV_SCANSTAMP will never be valid
169 * at the same time, so we can share the same space.
171 memcpy(end
->lr_attr_scanstamp
, &xoap
->xoa_projid
,
174 if (XVA_ISSET_REQ(xvap
, XAT_REPARSE
))
175 end
->lr_attr_attrs
|= (xoap
->xoa_reparse
== 0) ? 0 :
177 if (XVA_ISSET_REQ(xvap
, XAT_OFFLINE
))
178 end
->lr_attr_attrs
|= (xoap
->xoa_offline
== 0) ? 0 :
180 if (XVA_ISSET_REQ(xvap
, XAT_SPARSE
))
181 end
->lr_attr_attrs
|= (xoap
->xoa_sparse
== 0) ? 0 :
183 if (XVA_ISSET_REQ(xvap
, XAT_PROJINHERIT
))
184 end
->lr_attr_attrs
|= (xoap
->xoa_projinherit
== 0) ? 0 :
189 zfs_log_fuid_ids(zfs_fuid_info_t
*fuidp
, void *start
)
192 uint64_t *fuidloc
= start
;
194 /* First copy in the ACE FUIDs */
195 for (zfuid
= list_head(&fuidp
->z_fuids
); zfuid
;
196 zfuid
= list_next(&fuidp
->z_fuids
, zfuid
)) {
197 *fuidloc
++ = zfuid
->z_logfuid
;
204 zfs_log_fuid_domains(zfs_fuid_info_t
*fuidp
, void *start
)
206 zfs_fuid_domain_t
*zdomain
;
208 /* now copy in the domain info, if any */
209 if (fuidp
->z_domain_str_sz
!= 0) {
210 for (zdomain
= list_head(&fuidp
->z_domains
); zdomain
;
211 zdomain
= list_next(&fuidp
->z_domains
, zdomain
)) {
212 memcpy(start
, zdomain
->z_domain
,
213 strlen(zdomain
->z_domain
) + 1);
214 start
= (caddr_t
)start
+
215 strlen(zdomain
->z_domain
) + 1;
222 * If zp is an xattr node, check whether the xattr owner is unlinked.
223 * We don't want to log anything if the owner is unlinked.
226 zfs_xattr_owner_unlinked(znode_t
*zp
)
234 * zrele drops the vnode lock which violates the VOP locking contract
235 * on FreeBSD. See comment at the top of zfs_replay.c for more detail.
238 * if zp is XATTR node, keep walking up via z_xattr_parent until we
241 while (tzp
->z_pflags
& ZFS_XATTR
) {
242 ASSERT3U(zp
->z_xattr_parent
, !=, 0);
243 if (zfs_zget(ZTOZSB(tzp
), tzp
->z_xattr_parent
, &dzp
) != 0) {
251 unlinked
= tzp
->z_unlinked
;
258 * if zp is XATTR node, keep walking up via z_xattr_parent until we
261 while (zp
->z_pflags
& ZFS_XATTR
) {
262 ASSERT3U(zp
->z_xattr_parent
, !=, 0);
263 if (zfs_zget(ZTOZSB(zp
), zp
->z_xattr_parent
, &dzp
) != 0) {
270 unlinked
= zp
->z_unlinked
;
278 * Handles TX_CREATE, TX_CREATE_ATTR, TX_MKDIR, TX_MKDIR_ATTR and
279 * TK_MKXATTR transactions.
281 * TX_CREATE and TX_MKDIR are standard creates, but they may have FUID
282 * domain information appended prior to the name. In this case the
283 * uid/gid in the log record will be a log centric FUID.
285 * TX_CREATE_ACL_ATTR and TX_MKDIR_ACL_ATTR handle special creates that
286 * may contain attributes, ACL and optional fuid information.
288 * TX_CREATE_ACL and TX_MKDIR_ACL handle special creates that specify
289 * and ACL and normal users/groups in the ACEs.
291 * There may be an optional xvattr attribute information similar
292 * to zfs_log_setattr.
294 * Also, after the file name "domain" strings may be appended.
297 zfs_log_create(zilog_t
*zilog
, dmu_tx_t
*tx
, uint64_t txtype
,
298 znode_t
*dzp
, znode_t
*zp
, const char *name
, vsecattr_t
*vsecp
,
299 zfs_fuid_info_t
*fuidp
, vattr_t
*vap
)
303 lr_acl_create_t
*lracl
;
307 xvattr_t
*xvap
= (xvattr_t
*)vap
;
310 size_t namesize
= strlen(name
) + 1;
313 if (zil_replaying(zilog
, tx
) || zfs_xattr_owner_unlinked(dzp
))
317 * If we have FUIDs present then add in space for
318 * domains and ACE fuid's if any.
321 fuidsz
+= fuidp
->z_domain_str_sz
;
322 fuidsz
+= fuidp
->z_fuid_cnt
* sizeof (uint64_t);
325 if (vap
->va_mask
& ATTR_XVATTR
)
326 xvatsize
= ZIL_XVAT_SIZE(xvap
->xva_mapsize
);
328 if ((int)txtype
== TX_CREATE_ATTR
|| (int)txtype
== TX_MKDIR_ATTR
||
329 (int)txtype
== TX_CREATE
|| (int)txtype
== TX_MKDIR
||
330 (int)txtype
== TX_MKXATTR
) {
331 txsize
= sizeof (*lr
) + namesize
+ fuidsz
+ xvatsize
;
332 lrsize
= sizeof (*lr
);
335 sizeof (lr_acl_create_t
) + namesize
+ fuidsz
+
336 ZIL_ACE_LENGTH(aclsize
) + xvatsize
;
337 lrsize
= sizeof (lr_acl_create_t
);
340 itx
= zil_itx_create(txtype
, txsize
);
342 lr
= (lr_create_t
*)&itx
->itx_lr
;
343 lr
->lr_doid
= dzp
->z_id
;
344 lr
->lr_foid
= zp
->z_id
;
345 /* Store dnode slot count in 8 bits above object id. */
346 LR_FOID_SET_SLOTS(lr
->lr_foid
, zp
->z_dnodesize
>> DNODE_SHIFT
);
347 lr
->lr_mode
= zp
->z_mode
;
348 if (!IS_EPHEMERAL(KUID_TO_SUID(ZTOUID(zp
)))) {
349 lr
->lr_uid
= (uint64_t)KUID_TO_SUID(ZTOUID(zp
));
351 lr
->lr_uid
= fuidp
->z_fuid_owner
;
353 if (!IS_EPHEMERAL(KGID_TO_SGID(ZTOGID(zp
)))) {
354 lr
->lr_gid
= (uint64_t)KGID_TO_SGID(ZTOGID(zp
));
356 lr
->lr_gid
= fuidp
->z_fuid_group
;
358 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_GEN(ZTOZSB(zp
)), &lr
->lr_gen
,
360 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_CRTIME(ZTOZSB(zp
)),
361 lr
->lr_crtime
, sizeof (uint64_t) * 2);
363 if (sa_lookup(zp
->z_sa_hdl
, SA_ZPL_RDEV(ZTOZSB(zp
)), &lr
->lr_rdev
,
364 sizeof (lr
->lr_rdev
)) != 0)
368 * Fill in xvattr info if any
370 if (vap
->va_mask
& ATTR_XVATTR
) {
371 zfs_log_xvattr((lr_attr_t
*)((caddr_t
)lr
+ lrsize
), xvap
);
372 end
= (caddr_t
)lr
+ lrsize
+ xvatsize
;
374 end
= (caddr_t
)lr
+ lrsize
;
377 /* Now fill in any ACL info */
380 lracl
= (lr_acl_create_t
*)&itx
->itx_lr
;
381 lracl
->lr_aclcnt
= vsecp
->vsa_aclcnt
;
382 lracl
->lr_acl_bytes
= aclsize
;
383 lracl
->lr_domcnt
= fuidp
? fuidp
->z_domain_cnt
: 0;
384 lracl
->lr_fuidcnt
= fuidp
? fuidp
->z_fuid_cnt
: 0;
385 if (vsecp
->vsa_aclflags
& VSA_ACE_ACLFLAGS
)
386 lracl
->lr_acl_flags
= (uint64_t)vsecp
->vsa_aclflags
;
388 lracl
->lr_acl_flags
= 0;
390 memcpy(end
, vsecp
->vsa_aclentp
, aclsize
);
391 end
= (caddr_t
)end
+ ZIL_ACE_LENGTH(aclsize
);
394 /* drop in FUID info */
396 end
= zfs_log_fuid_ids(fuidp
, end
);
397 end
= zfs_log_fuid_domains(fuidp
, end
);
400 * Now place file name in log record
402 memcpy(end
, name
, namesize
);
404 zil_itx_assign(zilog
, itx
, tx
);
408 * Handles both TX_REMOVE and TX_RMDIR transactions.
411 zfs_log_remove(zilog_t
*zilog
, dmu_tx_t
*tx
, uint64_t txtype
,
412 znode_t
*dzp
, const char *name
, uint64_t foid
, boolean_t unlinked
)
416 size_t namesize
= strlen(name
) + 1;
418 if (zil_replaying(zilog
, tx
) || zfs_xattr_owner_unlinked(dzp
))
421 itx
= zil_itx_create(txtype
, sizeof (*lr
) + namesize
);
422 lr
= (lr_remove_t
*)&itx
->itx_lr
;
423 lr
->lr_doid
= dzp
->z_id
;
424 memcpy(lr
+ 1, name
, namesize
);
429 * Object ids can be re-instantiated in the next txg so
430 * remove any async transactions to avoid future leaks.
431 * This can happen if a fsync occurs on the re-instantiated
432 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets
433 * the new file data and flushes a write record for the old object.
436 ASSERT((txtype
& ~TX_CI
) == TX_REMOVE
);
437 zil_remove_async(zilog
, foid
);
439 zil_itx_assign(zilog
, itx
, tx
);
443 * Handles TX_LINK transactions.
446 zfs_log_link(zilog_t
*zilog
, dmu_tx_t
*tx
, uint64_t txtype
,
447 znode_t
*dzp
, znode_t
*zp
, const char *name
)
451 size_t namesize
= strlen(name
) + 1;
453 if (zil_replaying(zilog
, tx
))
456 itx
= zil_itx_create(txtype
, sizeof (*lr
) + namesize
);
457 lr
= (lr_link_t
*)&itx
->itx_lr
;
458 lr
->lr_doid
= dzp
->z_id
;
459 lr
->lr_link_obj
= zp
->z_id
;
460 memcpy(lr
+ 1, name
, namesize
);
462 zil_itx_assign(zilog
, itx
, tx
);
466 * Handles TX_SYMLINK transactions.
469 zfs_log_symlink(zilog_t
*zilog
, dmu_tx_t
*tx
, uint64_t txtype
,
470 znode_t
*dzp
, znode_t
*zp
, const char *name
, const char *link
)
474 size_t namesize
= strlen(name
) + 1;
475 size_t linksize
= strlen(link
) + 1;
477 if (zil_replaying(zilog
, tx
))
480 itx
= zil_itx_create(txtype
, sizeof (*lr
) + namesize
+ linksize
);
481 lr
= (lr_create_t
*)&itx
->itx_lr
;
482 lr
->lr_doid
= dzp
->z_id
;
483 lr
->lr_foid
= zp
->z_id
;
484 lr
->lr_uid
= KUID_TO_SUID(ZTOUID(zp
));
485 lr
->lr_gid
= KGID_TO_SGID(ZTOGID(zp
));
486 lr
->lr_mode
= zp
->z_mode
;
487 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_GEN(ZTOZSB(zp
)), &lr
->lr_gen
,
489 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_CRTIME(ZTOZSB(zp
)),
490 lr
->lr_crtime
, sizeof (uint64_t) * 2);
491 memcpy((char *)(lr
+ 1), name
, namesize
);
492 memcpy((char *)(lr
+ 1) + namesize
, link
, linksize
);
494 zil_itx_assign(zilog
, itx
, tx
);
498 * Handles TX_RENAME transactions.
501 zfs_log_rename(zilog_t
*zilog
, dmu_tx_t
*tx
, uint64_t txtype
, znode_t
*sdzp
,
502 const char *sname
, znode_t
*tdzp
, const char *dname
, znode_t
*szp
)
506 size_t snamesize
= strlen(sname
) + 1;
507 size_t dnamesize
= strlen(dname
) + 1;
509 if (zil_replaying(zilog
, tx
))
512 itx
= zil_itx_create(txtype
, sizeof (*lr
) + snamesize
+ dnamesize
);
513 lr
= (lr_rename_t
*)&itx
->itx_lr
;
514 lr
->lr_sdoid
= sdzp
->z_id
;
515 lr
->lr_tdoid
= tdzp
->z_id
;
516 memcpy((char *)(lr
+ 1), sname
, snamesize
);
517 memcpy((char *)(lr
+ 1) + snamesize
, dname
, dnamesize
);
518 itx
->itx_oid
= szp
->z_id
;
520 zil_itx_assign(zilog
, itx
, tx
);
524 * zfs_log_write() handles TX_WRITE transactions. The specified callback is
525 * called as soon as the write is on stable storage (be it via a DMU sync or a
528 static long zfs_immediate_write_sz
= 32768;
531 zfs_log_write(zilog_t
*zilog
, dmu_tx_t
*tx
, int txtype
,
532 znode_t
*zp
, offset_t off
, ssize_t resid
, int ioflag
,
533 zil_callback_t callback
, void *callback_data
)
535 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)sa_get_db(zp
->z_sa_hdl
);
536 uint32_t blocksize
= zp
->z_blksz
;
537 itx_wr_state_t write_state
;
540 ssize_t size
= resid
;
542 if (zil_replaying(zilog
, tx
) || zp
->z_unlinked
||
543 zfs_xattr_owner_unlinked(zp
)) {
544 if (callback
!= NULL
)
545 callback(callback_data
);
549 if (zilog
->zl_logbias
== ZFS_LOGBIAS_THROUGHPUT
)
550 write_state
= WR_INDIRECT
;
551 else if (!spa_has_slogs(zilog
->zl_spa
) &&
552 resid
>= zfs_immediate_write_sz
)
553 write_state
= WR_INDIRECT
;
554 else if (ioflag
& (O_SYNC
| O_DSYNC
))
555 write_state
= WR_COPIED
;
557 write_state
= WR_NEED_COPY
;
559 if ((fsync_cnt
= (uintptr_t)tsd_get(zfs_fsyncer_key
)) != 0) {
560 (void) tsd_set(zfs_fsyncer_key
, (void *)(fsync_cnt
- 1));
563 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_GEN(ZTOZSB(zp
)), &gen
,
569 itx_wr_state_t wr_state
= write_state
;
573 * A WR_COPIED record must fit entirely in one log block.
574 * Large writes can use WR_NEED_COPY, which the ZIL will
575 * split into multiple records across several log blocks
578 if (wr_state
== WR_COPIED
&&
579 resid
> zil_max_copied_data(zilog
))
580 wr_state
= WR_NEED_COPY
;
581 else if (wr_state
== WR_INDIRECT
)
582 len
= MIN(blocksize
- P2PHASE(off
, blocksize
), resid
);
584 itx
= zil_itx_create(txtype
, sizeof (*lr
) +
585 (wr_state
== WR_COPIED
? len
: 0));
586 lr
= (lr_write_t
*)&itx
->itx_lr
;
589 * For WR_COPIED records, copy the data into the lr_write_t.
591 if (wr_state
== WR_COPIED
) {
594 err
= dmu_read_by_dnode(DB_DNODE(db
), off
, len
, lr
+ 1,
595 DMU_READ_NO_PREFETCH
);
597 zil_itx_destroy(itx
);
598 itx
= zil_itx_create(txtype
, sizeof (*lr
));
599 lr
= (lr_write_t
*)&itx
->itx_lr
;
600 wr_state
= WR_NEED_COPY
;
605 itx
->itx_wr_state
= wr_state
;
606 lr
->lr_foid
= zp
->z_id
;
610 BP_ZERO(&lr
->lr_blkptr
);
612 itx
->itx_private
= ZTOZSB(zp
);
615 if (!(ioflag
& (O_SYNC
| O_DSYNC
)) && (zp
->z_sync_cnt
== 0) &&
617 itx
->itx_sync
= B_FALSE
;
619 itx
->itx_callback
= callback
;
620 itx
->itx_callback_data
= callback_data
;
621 zil_itx_assign(zilog
, itx
, tx
);
627 if (write_state
== WR_COPIED
|| write_state
== WR_NEED_COPY
) {
628 dsl_pool_wrlog_count(zilog
->zl_dmu_pool
, size
, tx
->tx_txg
);
633 * Handles TX_TRUNCATE transactions.
636 zfs_log_truncate(zilog_t
*zilog
, dmu_tx_t
*tx
, int txtype
,
637 znode_t
*zp
, uint64_t off
, uint64_t len
)
642 if (zil_replaying(zilog
, tx
) || zp
->z_unlinked
||
643 zfs_xattr_owner_unlinked(zp
))
646 itx
= zil_itx_create(txtype
, sizeof (*lr
));
647 lr
= (lr_truncate_t
*)&itx
->itx_lr
;
648 lr
->lr_foid
= zp
->z_id
;
652 itx
->itx_sync
= (zp
->z_sync_cnt
!= 0);
653 zil_itx_assign(zilog
, itx
, tx
);
657 * Handles TX_SETATTR transactions.
660 zfs_log_setattr(zilog_t
*zilog
, dmu_tx_t
*tx
, int txtype
,
661 znode_t
*zp
, vattr_t
*vap
, uint_t mask_applied
, zfs_fuid_info_t
*fuidp
)
665 xvattr_t
*xvap
= (xvattr_t
*)vap
;
666 size_t recsize
= sizeof (lr_setattr_t
);
669 if (zil_replaying(zilog
, tx
) || zp
->z_unlinked
)
673 * If XVATTR set, then log record size needs to allow
674 * for lr_attr_t + xvattr mask, mapsize and create time
675 * plus actual attribute values
677 if (vap
->va_mask
& ATTR_XVATTR
)
678 recsize
= sizeof (*lr
) + ZIL_XVAT_SIZE(xvap
->xva_mapsize
);
681 recsize
+= fuidp
->z_domain_str_sz
;
683 itx
= zil_itx_create(txtype
, recsize
);
684 lr
= (lr_setattr_t
*)&itx
->itx_lr
;
685 lr
->lr_foid
= zp
->z_id
;
686 lr
->lr_mask
= (uint64_t)mask_applied
;
687 lr
->lr_mode
= (uint64_t)vap
->va_mode
;
688 if ((mask_applied
& ATTR_UID
) && IS_EPHEMERAL(vap
->va_uid
))
689 lr
->lr_uid
= fuidp
->z_fuid_owner
;
691 lr
->lr_uid
= (uint64_t)vap
->va_uid
;
693 if ((mask_applied
& ATTR_GID
) && IS_EPHEMERAL(vap
->va_gid
))
694 lr
->lr_gid
= fuidp
->z_fuid_group
;
696 lr
->lr_gid
= (uint64_t)vap
->va_gid
;
698 lr
->lr_size
= (uint64_t)vap
->va_size
;
699 ZFS_TIME_ENCODE(&vap
->va_atime
, lr
->lr_atime
);
700 ZFS_TIME_ENCODE(&vap
->va_mtime
, lr
->lr_mtime
);
701 start
= (lr_setattr_t
*)(lr
+ 1);
702 if (vap
->va_mask
& ATTR_XVATTR
) {
703 zfs_log_xvattr((lr_attr_t
*)start
, xvap
);
704 start
= (caddr_t
)start
+ ZIL_XVAT_SIZE(xvap
->xva_mapsize
);
708 * Now stick on domain information if any on end
712 (void) zfs_log_fuid_domains(fuidp
, start
);
714 itx
->itx_sync
= (zp
->z_sync_cnt
!= 0);
715 zil_itx_assign(zilog
, itx
, tx
);
719 * Handles TX_SETSAXATTR transactions.
722 zfs_log_setsaxattr(zilog_t
*zilog
, dmu_tx_t
*tx
, int txtype
,
723 znode_t
*zp
, const char *name
, const void *value
, size_t size
)
727 size_t recsize
= sizeof (lr_setsaxattr_t
);
731 if (zil_replaying(zilog
, tx
) || zp
->z_unlinked
)
734 namelen
= strlen(name
) + 1;
735 recsize
+= (namelen
+ size
);
736 itx
= zil_itx_create(txtype
, recsize
);
737 lr
= (lr_setsaxattr_t
*)&itx
->itx_lr
;
738 lr
->lr_foid
= zp
->z_id
;
739 xattrstart
= (char *)(lr
+ 1);
740 memcpy(xattrstart
, name
, namelen
);
742 memcpy((char *)xattrstart
+ namelen
, value
, size
);
748 itx
->itx_sync
= (zp
->z_sync_cnt
!= 0);
749 zil_itx_assign(zilog
, itx
, tx
);
753 * Handles TX_ACL transactions.
756 zfs_log_acl(zilog_t
*zilog
, dmu_tx_t
*tx
, znode_t
*zp
,
757 vsecattr_t
*vsecp
, zfs_fuid_info_t
*fuidp
)
765 size_t aclbytes
= vsecp
->vsa_aclentsz
;
767 if (zil_replaying(zilog
, tx
) || zp
->z_unlinked
)
770 txtype
= (ZTOZSB(zp
)->z_version
< ZPL_VERSION_FUID
) ?
773 if (txtype
== TX_ACL
)
774 lrsize
= sizeof (*lr
);
776 lrsize
= sizeof (*lrv0
);
779 ((txtype
== TX_ACL
) ? ZIL_ACE_LENGTH(aclbytes
) : aclbytes
) +
780 (fuidp
? fuidp
->z_domain_str_sz
: 0) +
781 sizeof (uint64_t) * (fuidp
? fuidp
->z_fuid_cnt
: 0);
783 itx
= zil_itx_create(txtype
, txsize
);
785 lr
= (lr_acl_t
*)&itx
->itx_lr
;
786 lr
->lr_foid
= zp
->z_id
;
787 if (txtype
== TX_ACL
) {
788 lr
->lr_acl_bytes
= aclbytes
;
789 lr
->lr_domcnt
= fuidp
? fuidp
->z_domain_cnt
: 0;
790 lr
->lr_fuidcnt
= fuidp
? fuidp
->z_fuid_cnt
: 0;
791 if (vsecp
->vsa_mask
& VSA_ACE_ACLFLAGS
)
792 lr
->lr_acl_flags
= (uint64_t)vsecp
->vsa_aclflags
;
794 lr
->lr_acl_flags
= 0;
796 lr
->lr_aclcnt
= (uint64_t)vsecp
->vsa_aclcnt
;
798 if (txtype
== TX_ACL_V0
) {
799 lrv0
= (lr_acl_v0_t
*)lr
;
800 memcpy(lrv0
+ 1, vsecp
->vsa_aclentp
, aclbytes
);
802 void *start
= (ace_t
*)(lr
+ 1);
804 memcpy(start
, vsecp
->vsa_aclentp
, aclbytes
);
806 start
= (caddr_t
)start
+ ZIL_ACE_LENGTH(aclbytes
);
809 start
= zfs_log_fuid_ids(fuidp
, start
);
810 (void) zfs_log_fuid_domains(fuidp
, start
);
814 itx
->itx_sync
= (zp
->z_sync_cnt
!= 0);
815 zil_itx_assign(zilog
, itx
, tx
);
818 ZFS_MODULE_PARAM(zfs
, zfs_
, immediate_write_sz
, LONG
, ZMOD_RW
,
819 "Largest data block to write to zil");