4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2015, 2018 by Delphix. All rights reserved.
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/sysmacros.h>
30 #include <sys/cmn_err.h>
32 #include <sys/thread.h>
35 #include <sys/zfs_znode.h>
36 #include <sys/zfs_dir.h>
38 #include <sys/zil_impl.h>
39 #include <sys/byteorder.h>
40 #include <sys/policy.h>
46 #include <sys/zfs_fuid.h>
47 #include <sys/dsl_dataset.h>
50 * These zfs_log_* functions must be called within a dmu tx, in one
51 * of 2 contexts depending on zilog->z_replay:
55 * We need to record the transaction so that if it is committed to
56 * the Intent Log then it can be replayed. An intent log transaction
57 * structure (itx_t) is allocated and all the information necessary to
58 * possibly replay the transaction is saved in it. The itx is then assigned
59 * a sequence number and inserted in the in-memory list anchored in the zilog.
63 * We need to mark the intent log record as replayed in the log header.
64 * This is done in the same transaction as the replay so that they
69 zfs_log_create_txtype(zil_create_t type
, vsecattr_t
*vsecp
, vattr_t
*vap
)
71 int isxvattr
= (vap
->va_mask
& ATTR_XVATTR
);
74 if (vsecp
== NULL
&& !isxvattr
)
76 if (vsecp
&& isxvattr
)
77 return (TX_CREATE_ACL_ATTR
);
79 return (TX_CREATE_ACL
);
81 return (TX_CREATE_ATTR
);
83 if (vsecp
== NULL
&& !isxvattr
)
85 if (vsecp
&& isxvattr
)
86 return (TX_MKDIR_ACL_ATTR
);
88 return (TX_MKDIR_ACL
);
90 return (TX_MKDIR_ATTR
);
99 * build up the log data necessary for logging xvattr_t
100 * First lr_attr_t is initialized. following the lr_attr_t
101 * is the mapsize and attribute bitmap copied from the xvattr_t.
102 * Following the bitmap and bitmapsize two 64 bit words are reserved
103 * for the create time which may be set. Following the create time
104 * records a single 64 bit integer which has the bits to set on
105 * replay for the xvattr.
108 zfs_log_xvattr(lr_attr_t
*lrattr
, xvattr_t
*xvap
)
117 xoap
= xva_getxoptattr(xvap
);
120 lrattr
->lr_attr_masksize
= xvap
->xva_mapsize
;
121 bitmap
= &lrattr
->lr_attr_bitmap
;
122 for (i
= 0; i
!= xvap
->xva_mapsize
; i
++, bitmap
++) {
123 *bitmap
= xvap
->xva_reqattrmap
[i
];
126 /* Now pack the attributes up in a single uint64_t */
127 attrs
= (uint64_t *)bitmap
;
130 memset(crtime
, 0, 2 * sizeof (uint64_t));
131 scanstamp
= (caddr_t
)(crtime
+ 2);
132 memset(scanstamp
, 0, AV_SCANSTAMP_SZ
);
133 if (XVA_ISSET_REQ(xvap
, XAT_READONLY
))
134 *attrs
|= (xoap
->xoa_readonly
== 0) ? 0 :
136 if (XVA_ISSET_REQ(xvap
, XAT_HIDDEN
))
137 *attrs
|= (xoap
->xoa_hidden
== 0) ? 0 :
139 if (XVA_ISSET_REQ(xvap
, XAT_SYSTEM
))
140 *attrs
|= (xoap
->xoa_system
== 0) ? 0 :
142 if (XVA_ISSET_REQ(xvap
, XAT_ARCHIVE
))
143 *attrs
|= (xoap
->xoa_archive
== 0) ? 0 :
145 if (XVA_ISSET_REQ(xvap
, XAT_IMMUTABLE
))
146 *attrs
|= (xoap
->xoa_immutable
== 0) ? 0 :
148 if (XVA_ISSET_REQ(xvap
, XAT_NOUNLINK
))
149 *attrs
|= (xoap
->xoa_nounlink
== 0) ? 0 :
151 if (XVA_ISSET_REQ(xvap
, XAT_APPENDONLY
))
152 *attrs
|= (xoap
->xoa_appendonly
== 0) ? 0 :
154 if (XVA_ISSET_REQ(xvap
, XAT_OPAQUE
))
155 *attrs
|= (xoap
->xoa_opaque
== 0) ? 0 :
157 if (XVA_ISSET_REQ(xvap
, XAT_NODUMP
))
158 *attrs
|= (xoap
->xoa_nodump
== 0) ? 0 :
160 if (XVA_ISSET_REQ(xvap
, XAT_AV_QUARANTINED
))
161 *attrs
|= (xoap
->xoa_av_quarantined
== 0) ? 0 :
163 if (XVA_ISSET_REQ(xvap
, XAT_AV_MODIFIED
))
164 *attrs
|= (xoap
->xoa_av_modified
== 0) ? 0 :
166 if (XVA_ISSET_REQ(xvap
, XAT_CREATETIME
))
167 ZFS_TIME_ENCODE(&xoap
->xoa_createtime
, crtime
);
168 if (XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
)) {
169 ASSERT(!XVA_ISSET_REQ(xvap
, XAT_PROJID
));
171 memcpy(scanstamp
, xoap
->xoa_av_scanstamp
, AV_SCANSTAMP_SZ
);
172 } else if (XVA_ISSET_REQ(xvap
, XAT_PROJID
)) {
174 * XAT_PROJID and XAT_AV_SCANSTAMP will never be valid
175 * at the same time, so we can share the same space.
177 memcpy(scanstamp
, &xoap
->xoa_projid
, sizeof (uint64_t));
179 if (XVA_ISSET_REQ(xvap
, XAT_REPARSE
))
180 *attrs
|= (xoap
->xoa_reparse
== 0) ? 0 :
182 if (XVA_ISSET_REQ(xvap
, XAT_OFFLINE
))
183 *attrs
|= (xoap
->xoa_offline
== 0) ? 0 :
185 if (XVA_ISSET_REQ(xvap
, XAT_SPARSE
))
186 *attrs
|= (xoap
->xoa_sparse
== 0) ? 0 :
188 if (XVA_ISSET_REQ(xvap
, XAT_PROJINHERIT
))
189 *attrs
|= (xoap
->xoa_projinherit
== 0) ? 0 :
194 zfs_log_fuid_ids(zfs_fuid_info_t
*fuidp
, void *start
)
197 uint64_t *fuidloc
= start
;
199 /* First copy in the ACE FUIDs */
200 for (zfuid
= list_head(&fuidp
->z_fuids
); zfuid
;
201 zfuid
= list_next(&fuidp
->z_fuids
, zfuid
)) {
202 *fuidloc
++ = zfuid
->z_logfuid
;
209 zfs_log_fuid_domains(zfs_fuid_info_t
*fuidp
, void *start
)
211 zfs_fuid_domain_t
*zdomain
;
213 /* now copy in the domain info, if any */
214 if (fuidp
->z_domain_str_sz
!= 0) {
215 for (zdomain
= list_head(&fuidp
->z_domains
); zdomain
;
216 zdomain
= list_next(&fuidp
->z_domains
, zdomain
)) {
217 memcpy(start
, zdomain
->z_domain
,
218 strlen(zdomain
->z_domain
) + 1);
219 start
= (caddr_t
)start
+
220 strlen(zdomain
->z_domain
) + 1;
227 * If zp is an xattr node, check whether the xattr owner is unlinked.
228 * We don't want to log anything if the owner is unlinked.
231 zfs_xattr_owner_unlinked(znode_t
*zp
)
239 * zrele drops the vnode lock which violates the VOP locking contract
240 * on FreeBSD. See comment at the top of zfs_replay.c for more detail.
243 * if zp is XATTR node, keep walking up via z_xattr_parent until we
246 while (tzp
->z_pflags
& ZFS_XATTR
) {
247 ASSERT3U(zp
->z_xattr_parent
, !=, 0);
248 if (zfs_zget(ZTOZSB(tzp
), tzp
->z_xattr_parent
, &dzp
) != 0) {
256 unlinked
= tzp
->z_unlinked
;
263 * if zp is XATTR node, keep walking up via z_xattr_parent until we
266 while (zp
->z_pflags
& ZFS_XATTR
) {
267 ASSERT3U(zp
->z_xattr_parent
, !=, 0);
268 if (zfs_zget(ZTOZSB(zp
), zp
->z_xattr_parent
, &dzp
) != 0) {
275 unlinked
= zp
->z_unlinked
;
283 * Handles TX_CREATE, TX_CREATE_ATTR, TX_MKDIR, TX_MKDIR_ATTR and
284 * TK_MKXATTR transactions.
286 * TX_CREATE and TX_MKDIR are standard creates, but they may have FUID
287 * domain information appended prior to the name. In this case the
288 * uid/gid in the log record will be a log centric FUID.
290 * TX_CREATE_ACL_ATTR and TX_MKDIR_ACL_ATTR handle special creates that
291 * may contain attributes, ACL and optional fuid information.
293 * TX_CREATE_ACL and TX_MKDIR_ACL handle special creates that specify
294 * and ACL and normal users/groups in the ACEs.
296 * There may be an optional xvattr attribute information similar
297 * to zfs_log_setattr.
299 * Also, after the file name "domain" strings may be appended.
302 zfs_log_create(zilog_t
*zilog
, dmu_tx_t
*tx
, uint64_t txtype
,
303 znode_t
*dzp
, znode_t
*zp
, const char *name
, vsecattr_t
*vsecp
,
304 zfs_fuid_info_t
*fuidp
, vattr_t
*vap
)
308 lr_acl_create_t
*lracl
;
312 xvattr_t
*xvap
= (xvattr_t
*)vap
;
315 size_t namesize
= strlen(name
) + 1;
318 if (zil_replaying(zilog
, tx
) || zfs_xattr_owner_unlinked(dzp
))
322 * If we have FUIDs present then add in space for
323 * domains and ACE fuid's if any.
326 fuidsz
+= fuidp
->z_domain_str_sz
;
327 fuidsz
+= fuidp
->z_fuid_cnt
* sizeof (uint64_t);
330 if (vap
->va_mask
& ATTR_XVATTR
)
331 xvatsize
= ZIL_XVAT_SIZE(xvap
->xva_mapsize
);
333 if ((int)txtype
== TX_CREATE_ATTR
|| (int)txtype
== TX_MKDIR_ATTR
||
334 (int)txtype
== TX_CREATE
|| (int)txtype
== TX_MKDIR
||
335 (int)txtype
== TX_MKXATTR
) {
336 txsize
= sizeof (*lr
) + namesize
+ fuidsz
+ xvatsize
;
337 lrsize
= sizeof (*lr
);
340 sizeof (lr_acl_create_t
) + namesize
+ fuidsz
+
341 ZIL_ACE_LENGTH(aclsize
) + xvatsize
;
342 lrsize
= sizeof (lr_acl_create_t
);
345 itx
= zil_itx_create(txtype
, txsize
);
347 lr
= (lr_create_t
*)&itx
->itx_lr
;
348 lr
->lr_doid
= dzp
->z_id
;
349 lr
->lr_foid
= zp
->z_id
;
350 /* Store dnode slot count in 8 bits above object id. */
351 LR_FOID_SET_SLOTS(lr
->lr_foid
, zp
->z_dnodesize
>> DNODE_SHIFT
);
352 lr
->lr_mode
= zp
->z_mode
;
353 if (!IS_EPHEMERAL(KUID_TO_SUID(ZTOUID(zp
)))) {
354 lr
->lr_uid
= (uint64_t)KUID_TO_SUID(ZTOUID(zp
));
356 lr
->lr_uid
= fuidp
->z_fuid_owner
;
358 if (!IS_EPHEMERAL(KGID_TO_SGID(ZTOGID(zp
)))) {
359 lr
->lr_gid
= (uint64_t)KGID_TO_SGID(ZTOGID(zp
));
361 lr
->lr_gid
= fuidp
->z_fuid_group
;
363 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_GEN(ZTOZSB(zp
)), &lr
->lr_gen
,
365 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_CRTIME(ZTOZSB(zp
)),
366 lr
->lr_crtime
, sizeof (uint64_t) * 2);
368 if (sa_lookup(zp
->z_sa_hdl
, SA_ZPL_RDEV(ZTOZSB(zp
)), &lr
->lr_rdev
,
369 sizeof (lr
->lr_rdev
)) != 0)
373 * Fill in xvattr info if any
375 if (vap
->va_mask
& ATTR_XVATTR
) {
376 zfs_log_xvattr((lr_attr_t
*)((caddr_t
)lr
+ lrsize
), xvap
);
377 end
= (caddr_t
)lr
+ lrsize
+ xvatsize
;
379 end
= (caddr_t
)lr
+ lrsize
;
382 /* Now fill in any ACL info */
385 lracl
= (lr_acl_create_t
*)&itx
->itx_lr
;
386 lracl
->lr_aclcnt
= vsecp
->vsa_aclcnt
;
387 lracl
->lr_acl_bytes
= aclsize
;
388 lracl
->lr_domcnt
= fuidp
? fuidp
->z_domain_cnt
: 0;
389 lracl
->lr_fuidcnt
= fuidp
? fuidp
->z_fuid_cnt
: 0;
390 if (vsecp
->vsa_aclflags
& VSA_ACE_ACLFLAGS
)
391 lracl
->lr_acl_flags
= (uint64_t)vsecp
->vsa_aclflags
;
393 lracl
->lr_acl_flags
= 0;
395 memcpy(end
, vsecp
->vsa_aclentp
, aclsize
);
396 end
= (caddr_t
)end
+ ZIL_ACE_LENGTH(aclsize
);
399 /* drop in FUID info */
401 end
= zfs_log_fuid_ids(fuidp
, end
);
402 end
= zfs_log_fuid_domains(fuidp
, end
);
405 * Now place file name in log record
407 memcpy(end
, name
, namesize
);
409 zil_itx_assign(zilog
, itx
, tx
);
413 * Handles both TX_REMOVE and TX_RMDIR transactions.
416 zfs_log_remove(zilog_t
*zilog
, dmu_tx_t
*tx
, uint64_t txtype
,
417 znode_t
*dzp
, const char *name
, uint64_t foid
, boolean_t unlinked
)
421 size_t namesize
= strlen(name
) + 1;
423 if (zil_replaying(zilog
, tx
) || zfs_xattr_owner_unlinked(dzp
))
426 itx
= zil_itx_create(txtype
, sizeof (*lr
) + namesize
);
427 lr
= (lr_remove_t
*)&itx
->itx_lr
;
428 lr
->lr_doid
= dzp
->z_id
;
429 memcpy(lr
+ 1, name
, namesize
);
434 * Object ids can be re-instantiated in the next txg so
435 * remove any async transactions to avoid future leaks.
436 * This can happen if a fsync occurs on the re-instantiated
437 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets
438 * the new file data and flushes a write record for the old object.
441 ASSERT((txtype
& ~TX_CI
) == TX_REMOVE
);
442 zil_remove_async(zilog
, foid
);
444 zil_itx_assign(zilog
, itx
, tx
);
448 * Handles TX_LINK transactions.
451 zfs_log_link(zilog_t
*zilog
, dmu_tx_t
*tx
, uint64_t txtype
,
452 znode_t
*dzp
, znode_t
*zp
, const char *name
)
456 size_t namesize
= strlen(name
) + 1;
458 if (zil_replaying(zilog
, tx
))
461 itx
= zil_itx_create(txtype
, sizeof (*lr
) + namesize
);
462 lr
= (lr_link_t
*)&itx
->itx_lr
;
463 lr
->lr_doid
= dzp
->z_id
;
464 lr
->lr_link_obj
= zp
->z_id
;
465 memcpy(lr
+ 1, name
, namesize
);
467 zil_itx_assign(zilog
, itx
, tx
);
471 * Handles TX_SYMLINK transactions.
474 zfs_log_symlink(zilog_t
*zilog
, dmu_tx_t
*tx
, uint64_t txtype
,
475 znode_t
*dzp
, znode_t
*zp
, const char *name
, const char *link
)
479 size_t namesize
= strlen(name
) + 1;
480 size_t linksize
= strlen(link
) + 1;
482 if (zil_replaying(zilog
, tx
))
485 itx
= zil_itx_create(txtype
, sizeof (*lr
) + namesize
+ linksize
);
486 lr
= (lr_create_t
*)&itx
->itx_lr
;
487 lr
->lr_doid
= dzp
->z_id
;
488 lr
->lr_foid
= zp
->z_id
;
489 lr
->lr_uid
= KUID_TO_SUID(ZTOUID(zp
));
490 lr
->lr_gid
= KGID_TO_SGID(ZTOGID(zp
));
491 lr
->lr_mode
= zp
->z_mode
;
492 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_GEN(ZTOZSB(zp
)), &lr
->lr_gen
,
494 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_CRTIME(ZTOZSB(zp
)),
495 lr
->lr_crtime
, sizeof (uint64_t) * 2);
496 memcpy((char *)(lr
+ 1), name
, namesize
);
497 memcpy((char *)(lr
+ 1) + namesize
, link
, linksize
);
499 zil_itx_assign(zilog
, itx
, tx
);
503 * Handles TX_RENAME transactions.
506 zfs_log_rename(zilog_t
*zilog
, dmu_tx_t
*tx
, uint64_t txtype
, znode_t
*sdzp
,
507 const char *sname
, znode_t
*tdzp
, const char *dname
, znode_t
*szp
)
511 size_t snamesize
= strlen(sname
) + 1;
512 size_t dnamesize
= strlen(dname
) + 1;
514 if (zil_replaying(zilog
, tx
))
517 itx
= zil_itx_create(txtype
, sizeof (*lr
) + snamesize
+ dnamesize
);
518 lr
= (lr_rename_t
*)&itx
->itx_lr
;
519 lr
->lr_sdoid
= sdzp
->z_id
;
520 lr
->lr_tdoid
= tdzp
->z_id
;
521 memcpy((char *)(lr
+ 1), sname
, snamesize
);
522 memcpy((char *)(lr
+ 1) + snamesize
, dname
, dnamesize
);
523 itx
->itx_oid
= szp
->z_id
;
525 zil_itx_assign(zilog
, itx
, tx
);
529 * zfs_log_write() handles TX_WRITE transactions. The specified callback is
530 * called as soon as the write is on stable storage (be it via a DMU sync or a
533 static long zfs_immediate_write_sz
= 32768;
536 zfs_log_write(zilog_t
*zilog
, dmu_tx_t
*tx
, int txtype
,
537 znode_t
*zp
, offset_t off
, ssize_t resid
, int ioflag
,
538 zil_callback_t callback
, void *callback_data
)
540 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)sa_get_db(zp
->z_sa_hdl
);
541 uint32_t blocksize
= zp
->z_blksz
;
542 itx_wr_state_t write_state
;
545 ssize_t size
= resid
;
547 if (zil_replaying(zilog
, tx
) || zp
->z_unlinked
||
548 zfs_xattr_owner_unlinked(zp
)) {
549 if (callback
!= NULL
)
550 callback(callback_data
);
554 if (zilog
->zl_logbias
== ZFS_LOGBIAS_THROUGHPUT
)
555 write_state
= WR_INDIRECT
;
556 else if (!spa_has_slogs(zilog
->zl_spa
) &&
557 resid
>= zfs_immediate_write_sz
)
558 write_state
= WR_INDIRECT
;
559 else if (ioflag
& (O_SYNC
| O_DSYNC
))
560 write_state
= WR_COPIED
;
562 write_state
= WR_NEED_COPY
;
564 if ((fsync_cnt
= (uintptr_t)tsd_get(zfs_fsyncer_key
)) != 0) {
565 (void) tsd_set(zfs_fsyncer_key
, (void *)(fsync_cnt
- 1));
568 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_GEN(ZTOZSB(zp
)), &gen
,
574 itx_wr_state_t wr_state
= write_state
;
578 * A WR_COPIED record must fit entirely in one log block.
579 * Large writes can use WR_NEED_COPY, which the ZIL will
580 * split into multiple records across several log blocks
583 if (wr_state
== WR_COPIED
&&
584 resid
> zil_max_copied_data(zilog
))
585 wr_state
= WR_NEED_COPY
;
586 else if (wr_state
== WR_INDIRECT
)
587 len
= MIN(blocksize
- P2PHASE(off
, blocksize
), resid
);
589 itx
= zil_itx_create(txtype
, sizeof (*lr
) +
590 (wr_state
== WR_COPIED
? len
: 0));
591 lr
= (lr_write_t
*)&itx
->itx_lr
;
594 * For WR_COPIED records, copy the data into the lr_write_t.
596 if (wr_state
== WR_COPIED
) {
599 err
= dmu_read_by_dnode(DB_DNODE(db
), off
, len
, lr
+ 1,
600 DMU_READ_NO_PREFETCH
);
602 zil_itx_destroy(itx
);
603 itx
= zil_itx_create(txtype
, sizeof (*lr
));
604 lr
= (lr_write_t
*)&itx
->itx_lr
;
605 wr_state
= WR_NEED_COPY
;
610 itx
->itx_wr_state
= wr_state
;
611 lr
->lr_foid
= zp
->z_id
;
615 BP_ZERO(&lr
->lr_blkptr
);
617 itx
->itx_private
= ZTOZSB(zp
);
620 if (!(ioflag
& (O_SYNC
| O_DSYNC
)) && (zp
->z_sync_cnt
== 0) &&
622 itx
->itx_sync
= B_FALSE
;
624 itx
->itx_callback
= callback
;
625 itx
->itx_callback_data
= callback_data
;
626 zil_itx_assign(zilog
, itx
, tx
);
632 if (write_state
== WR_COPIED
|| write_state
== WR_NEED_COPY
) {
633 dsl_pool_wrlog_count(zilog
->zl_dmu_pool
, size
, tx
->tx_txg
);
638 * Handles TX_TRUNCATE transactions.
641 zfs_log_truncate(zilog_t
*zilog
, dmu_tx_t
*tx
, int txtype
,
642 znode_t
*zp
, uint64_t off
, uint64_t len
)
647 if (zil_replaying(zilog
, tx
) || zp
->z_unlinked
||
648 zfs_xattr_owner_unlinked(zp
))
651 itx
= zil_itx_create(txtype
, sizeof (*lr
));
652 lr
= (lr_truncate_t
*)&itx
->itx_lr
;
653 lr
->lr_foid
= zp
->z_id
;
657 itx
->itx_sync
= (zp
->z_sync_cnt
!= 0);
658 zil_itx_assign(zilog
, itx
, tx
);
662 * Handles TX_SETATTR transactions.
665 zfs_log_setattr(zilog_t
*zilog
, dmu_tx_t
*tx
, int txtype
,
666 znode_t
*zp
, vattr_t
*vap
, uint_t mask_applied
, zfs_fuid_info_t
*fuidp
)
670 xvattr_t
*xvap
= (xvattr_t
*)vap
;
671 size_t recsize
= sizeof (lr_setattr_t
);
674 if (zil_replaying(zilog
, tx
) || zp
->z_unlinked
)
678 * If XVATTR set, then log record size needs to allow
679 * for lr_attr_t + xvattr mask, mapsize and create time
680 * plus actual attribute values
682 if (vap
->va_mask
& ATTR_XVATTR
)
683 recsize
= sizeof (*lr
) + ZIL_XVAT_SIZE(xvap
->xva_mapsize
);
686 recsize
+= fuidp
->z_domain_str_sz
;
688 itx
= zil_itx_create(txtype
, recsize
);
689 lr
= (lr_setattr_t
*)&itx
->itx_lr
;
690 lr
->lr_foid
= zp
->z_id
;
691 lr
->lr_mask
= (uint64_t)mask_applied
;
692 lr
->lr_mode
= (uint64_t)vap
->va_mode
;
693 if ((mask_applied
& ATTR_UID
) && IS_EPHEMERAL(vap
->va_uid
))
694 lr
->lr_uid
= fuidp
->z_fuid_owner
;
696 lr
->lr_uid
= (uint64_t)vap
->va_uid
;
698 if ((mask_applied
& ATTR_GID
) && IS_EPHEMERAL(vap
->va_gid
))
699 lr
->lr_gid
= fuidp
->z_fuid_group
;
701 lr
->lr_gid
= (uint64_t)vap
->va_gid
;
703 lr
->lr_size
= (uint64_t)vap
->va_size
;
704 ZFS_TIME_ENCODE(&vap
->va_atime
, lr
->lr_atime
);
705 ZFS_TIME_ENCODE(&vap
->va_mtime
, lr
->lr_mtime
);
706 start
= (lr_setattr_t
*)(lr
+ 1);
707 if (vap
->va_mask
& ATTR_XVATTR
) {
708 zfs_log_xvattr((lr_attr_t
*)start
, xvap
);
709 start
= (caddr_t
)start
+ ZIL_XVAT_SIZE(xvap
->xva_mapsize
);
713 * Now stick on domain information if any on end
717 (void) zfs_log_fuid_domains(fuidp
, start
);
719 itx
->itx_sync
= (zp
->z_sync_cnt
!= 0);
720 zil_itx_assign(zilog
, itx
, tx
);
724 * Handles TX_SETSAXATTR transactions.
727 zfs_log_setsaxattr(zilog_t
*zilog
, dmu_tx_t
*tx
, int txtype
,
728 znode_t
*zp
, const char *name
, const void *value
, size_t size
)
732 size_t recsize
= sizeof (lr_setsaxattr_t
);
736 if (zil_replaying(zilog
, tx
) || zp
->z_unlinked
)
739 namelen
= strlen(name
) + 1;
740 recsize
+= (namelen
+ size
);
741 itx
= zil_itx_create(txtype
, recsize
);
742 lr
= (lr_setsaxattr_t
*)&itx
->itx_lr
;
743 lr
->lr_foid
= zp
->z_id
;
744 xattrstart
= (char *)(lr
+ 1);
745 memcpy(xattrstart
, name
, namelen
);
747 memcpy((char *)xattrstart
+ namelen
, value
, size
);
753 itx
->itx_sync
= (zp
->z_sync_cnt
!= 0);
754 zil_itx_assign(zilog
, itx
, tx
);
758 * Handles TX_ACL transactions.
761 zfs_log_acl(zilog_t
*zilog
, dmu_tx_t
*tx
, znode_t
*zp
,
762 vsecattr_t
*vsecp
, zfs_fuid_info_t
*fuidp
)
770 size_t aclbytes
= vsecp
->vsa_aclentsz
;
772 if (zil_replaying(zilog
, tx
) || zp
->z_unlinked
)
775 txtype
= (ZTOZSB(zp
)->z_version
< ZPL_VERSION_FUID
) ?
778 if (txtype
== TX_ACL
)
779 lrsize
= sizeof (*lr
);
781 lrsize
= sizeof (*lrv0
);
784 ((txtype
== TX_ACL
) ? ZIL_ACE_LENGTH(aclbytes
) : aclbytes
) +
785 (fuidp
? fuidp
->z_domain_str_sz
: 0) +
786 sizeof (uint64_t) * (fuidp
? fuidp
->z_fuid_cnt
: 0);
788 itx
= zil_itx_create(txtype
, txsize
);
790 lr
= (lr_acl_t
*)&itx
->itx_lr
;
791 lr
->lr_foid
= zp
->z_id
;
792 if (txtype
== TX_ACL
) {
793 lr
->lr_acl_bytes
= aclbytes
;
794 lr
->lr_domcnt
= fuidp
? fuidp
->z_domain_cnt
: 0;
795 lr
->lr_fuidcnt
= fuidp
? fuidp
->z_fuid_cnt
: 0;
796 if (vsecp
->vsa_mask
& VSA_ACE_ACLFLAGS
)
797 lr
->lr_acl_flags
= (uint64_t)vsecp
->vsa_aclflags
;
799 lr
->lr_acl_flags
= 0;
801 lr
->lr_aclcnt
= (uint64_t)vsecp
->vsa_aclcnt
;
803 if (txtype
== TX_ACL_V0
) {
804 lrv0
= (lr_acl_v0_t
*)lr
;
805 memcpy(lrv0
+ 1, vsecp
->vsa_aclentp
, aclbytes
);
807 void *start
= (ace_t
*)(lr
+ 1);
809 memcpy(start
, vsecp
->vsa_aclentp
, aclbytes
);
811 start
= (caddr_t
)start
+ ZIL_ACE_LENGTH(aclbytes
);
814 start
= zfs_log_fuid_ids(fuidp
, start
);
815 (void) zfs_log_fuid_domains(fuidp
, start
);
819 itx
->itx_sync
= (zp
->z_sync_cnt
!= 0);
820 zil_itx_assign(zilog
, itx
, tx
);
823 ZFS_MODULE_PARAM(zfs
, zfs_
, immediate_write_sz
, LONG
, ZMOD_RW
,
824 "Largest data block to write to zil");