*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015 by Delphix. All rights reserved.
*/
int
zfs_log_create_txtype(zil_create_t type, vsecattr_t *vsecp, vattr_t *vap)
{
-#ifdef HAVE_XVATTR
- int isxvattr = (vap->va_mask & AT_XVATTR);
-#else
- int isxvattr = 0;
-#endif /* HAVE_XVATTR */
+ int isxvattr = (vap->va_mask & ATTR_XVATTR);
switch (type) {
case Z_FILE:
if (vsecp == NULL && !isxvattr)
return (TX_MAX_TYPE);
}
-#ifdef HAVE_XVATTR
/*
* build up the log data necessary for logging xvattr_t
* First lr_attr_t is initialized. following the lr_attr_t
XAT0_AV_MODIFIED;
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME))
ZFS_TIME_ENCODE(&xoap->xoa_createtime, crtime);
- if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
+ if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
+ ASSERT(!XVA_ISSET_REQ(xvap, XAT_PROJID));
+
bcopy(xoap->xoa_av_scanstamp, scanstamp, AV_SCANSTAMP_SZ);
+ } else if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
+ /*
+ * XAT_PROJID and XAT_AV_SCANSTAMP will never be valid
+ * at the same time, so we can share the same space.
+ */
+ bcopy(&xoap->xoa_projid, scanstamp, sizeof (uint64_t));
+ }
if (XVA_ISSET_REQ(xvap, XAT_REPARSE))
*attrs |= (xoap->xoa_reparse == 0) ? 0 :
XAT0_REPARSE;
if (XVA_ISSET_REQ(xvap, XAT_SPARSE))
*attrs |= (xoap->xoa_sparse == 0) ? 0 :
XAT0_SPARSE;
+ if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT))
+ *attrs |= (xoap->xoa_projinherit == 0) ? 0 :
+ XAT0_PROJINHERIT;
}
static void *
}
return (start);
}
-#endif /* HAVE_XVATTR */
/*
- * zfs_log_create() is used to handle TX_CREATE, TX_CREATE_ATTR, TX_MKDIR,
- * TX_MKDIR_ATTR and TX_MKXATTR
- * transactions.
+ * If zp is an xattr node, check whether the xattr owner is unlinked.
+ * We don't want to log anything if the owner is unlinked.
+ */
+static int
+zfs_xattr_owner_unlinked(znode_t *zp)
+{
+ int unlinked = 0;
+ znode_t *dzp;
+ igrab(ZTOI(zp));
+ /*
+ * if zp is XATTR node, keep walking up via z_xattr_parent until we
+ * get the owner
+ */
+ while (zp->z_pflags & ZFS_XATTR) {
+ ASSERT3U(zp->z_xattr_parent, !=, 0);
+ if (zfs_zget(ZTOZSB(zp), zp->z_xattr_parent, &dzp) != 0) {
+ unlinked = 1;
+ break;
+ }
+ iput(ZTOI(zp));
+ zp = dzp;
+ unlinked = zp->z_unlinked;
+ }
+ iput(ZTOI(zp));
+ return (unlinked);
+}
+
+/*
+ * Handles TX_CREATE, TX_CREATE_ATTR, TX_MKDIR, TX_MKDIR_ATTR and
+ * TK_MKXATTR transactions.
*
* TX_CREATE and TX_MKDIR are standard creates, but they may have FUID
* domain information appended prior to the name. In this case the
{
itx_t *itx;
lr_create_t *lr;
-#ifdef HAVE_XVATTR
lr_acl_create_t *lracl;
- xvattr_t *xvap = (xvattr_t *)vap;
-#endif /* HAVE_XVATTR */
- size_t aclsize;
+ size_t aclsize = 0;
size_t xvatsize = 0;
size_t txsize;
+ xvattr_t *xvap = (xvattr_t *)vap;
void *end;
size_t lrsize;
size_t namesize = strlen(name) + 1;
size_t fuidsz = 0;
- if (zil_replaying(zilog, tx))
+ if (zil_replaying(zilog, tx) || zfs_xattr_owner_unlinked(dzp))
return;
/*
fuidsz += fuidp->z_fuid_cnt * sizeof (uint64_t);
}
-#ifdef HAVE_XVATTR
- if (vap->va_mask & AT_XVATTR)
+ if (vap->va_mask & ATTR_XVATTR)
xvatsize = ZIL_XVAT_SIZE(xvap->xva_mapsize);
-#endif /* HAVE_XVATTR */
if ((int)txtype == TX_CREATE_ATTR || (int)txtype == TX_MKDIR_ATTR ||
(int)txtype == TX_CREATE || (int)txtype == TX_MKDIR ||
txsize = sizeof (*lr) + namesize + fuidsz + xvatsize;
lrsize = sizeof (*lr);
} else {
- aclsize = (vsecp) ? vsecp->vsa_aclentsz : 0;
txsize =
sizeof (lr_acl_create_t) + namesize + fuidsz +
ZIL_ACE_LENGTH(aclsize) + xvatsize;
lr = (lr_create_t *)&itx->itx_lr;
lr->lr_doid = dzp->z_id;
lr->lr_foid = zp->z_id;
+ /* Store dnode slot count in 8 bits above object id. */
+ LR_FOID_SET_SLOTS(lr->lr_foid, zp->z_dnodesize >> DNODE_SHIFT);
lr->lr_mode = zp->z_mode;
- if (!IS_EPHEMERAL(zp->z_uid)) {
- lr->lr_uid = (uint64_t)zp->z_uid;
+ if (!IS_EPHEMERAL(KUID_TO_SUID(ZTOI(zp)->i_uid))) {
+ lr->lr_uid = (uint64_t)KUID_TO_SUID(ZTOI(zp)->i_uid);
} else {
lr->lr_uid = fuidp->z_fuid_owner;
}
- if (!IS_EPHEMERAL(zp->z_gid)) {
- lr->lr_gid = (uint64_t)zp->z_gid;
+ if (!IS_EPHEMERAL(KGID_TO_SGID(ZTOI(zp)->i_gid))) {
+ lr->lr_gid = (uint64_t)KGID_TO_SGID(ZTOI(zp)->i_gid);
} else {
lr->lr_gid = fuidp->z_fuid_group;
}
/*
* Fill in xvattr info if any
*/
-#ifdef HAVE_XVATTR
- if (vap->va_mask & AT_XVATTR) {
+ if (vap->va_mask & ATTR_XVATTR) {
zfs_log_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), xvap);
end = (caddr_t)lr + lrsize + xvatsize;
} else {
end = zfs_log_fuid_ids(fuidp, end);
end = zfs_log_fuid_domains(fuidp, end);
}
-#else
- end = (caddr_t)lr + lrsize;
-#endif /* HAVE_XVATTR */
/*
* Now place file name in log record
*/
}
/*
- * zfs_log_remove() handles both TX_REMOVE and TX_RMDIR transactions.
+ * Handles both TX_REMOVE and TX_RMDIR transactions.
*/
void
zfs_log_remove(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
- znode_t *dzp, char *name, uint64_t foid)
+ znode_t *dzp, char *name, uint64_t foid)
{
itx_t *itx;
lr_remove_t *lr;
size_t namesize = strlen(name) + 1;
- if (zil_replaying(zilog, tx))
+ if (zil_replaying(zilog, tx) || zfs_xattr_owner_unlinked(dzp))
return;
itx = zil_itx_create(txtype, sizeof (*lr) + namesize);
}
/*
- * zfs_log_link() handles TX_LINK transactions.
+ * Handles TX_LINK transactions.
*/
void
zfs_log_link(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
- znode_t *dzp, znode_t *zp, char *name)
+ znode_t *dzp, znode_t *zp, char *name)
{
itx_t *itx;
lr_link_t *lr;
}
/*
- * zfs_log_symlink() handles TX_SYMLINK transactions.
+ * Handles TX_SYMLINK transactions.
*/
void
zfs_log_symlink(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
lr = (lr_create_t *)&itx->itx_lr;
lr->lr_doid = dzp->z_id;
lr->lr_foid = zp->z_id;
- lr->lr_uid = zp->z_uid;
- lr->lr_gid = zp->z_gid;
+ lr->lr_uid = KUID_TO_SUID(ZTOI(zp)->i_uid);
+ lr->lr_gid = KGID_TO_SGID(ZTOI(zp)->i_gid);
lr->lr_mode = zp->z_mode;
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(ZTOZSB(zp)), &lr->lr_gen,
sizeof (uint64_t));
}
/*
- * zfs_log_rename() handles TX_RENAME transactions.
+ * Handles TX_RENAME transactions.
*/
void
zfs_log_rename(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype,
- znode_t *sdzp, char *sname, znode_t *tdzp, char *dname, znode_t *szp)
+ znode_t *sdzp, char *sname, znode_t *tdzp, char *dname, znode_t *szp)
{
itx_t *itx;
lr_rename_t *lr;
}
/*
- * zfs_log_write() handles TX_WRITE transactions.
+ * zfs_log_write() handles TX_WRITE transactions. The specified callback is
+ * called as soon as the write is on stable storage (be it via a DMU sync or a
+ * ZIL commit).
*/
-ssize_t zfs_immediate_write_sz = 32768;
+long zfs_immediate_write_sz = 32768;
void
zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
- znode_t *zp, offset_t off, ssize_t resid, int ioflag)
+ znode_t *zp, offset_t off, ssize_t resid, int ioflag,
+ zil_callback_t callback, void *callback_data)
{
+ uint32_t blocksize = zp->z_blksz;
itx_wr_state_t write_state;
- boolean_t slogging;
uintptr_t fsync_cnt;
- ssize_t immediate_write_sz;
- if (zil_replaying(zilog, tx) || zp->z_unlinked)
+ if (zil_replaying(zilog, tx) || zp->z_unlinked ||
+ zfs_xattr_owner_unlinked(zp)) {
+ if (callback != NULL)
+ callback(callback_data);
return;
+ }
- immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
- ? 0 : zfs_immediate_write_sz;
-
- slogging = spa_has_slogs(zilog->zl_spa) &&
- (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
- if (resid > immediate_write_sz && !slogging && resid <= zp->z_blksz)
+ if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
+ write_state = WR_INDIRECT;
+ else if (!spa_has_slogs(zilog->zl_spa) &&
+ resid >= zfs_immediate_write_sz)
write_state = WR_INDIRECT;
else if (ioflag & (FSYNC | FDSYNC))
write_state = WR_COPIED;
while (resid) {
itx_t *itx;
lr_write_t *lr;
- ssize_t len;
+ itx_wr_state_t wr_state = write_state;
+ ssize_t len = resid;
- /*
- * If the write would overflow the largest block then split it.
- */
- if (write_state != WR_INDIRECT && resid > ZIL_MAX_LOG_DATA)
- len = SPA_MAXBLOCKSIZE >> 1;
- else
- len = resid;
+ if (wr_state == WR_COPIED && resid > ZIL_MAX_COPIED_DATA)
+ wr_state = WR_NEED_COPY;
+ else if (wr_state == WR_INDIRECT)
+ len = MIN(blocksize - P2PHASE(off, blocksize), resid);
itx = zil_itx_create(txtype, sizeof (*lr) +
- (write_state == WR_COPIED ? len : 0));
+ (wr_state == WR_COPIED ? len : 0));
lr = (lr_write_t *)&itx->itx_lr;
- if (write_state == WR_COPIED && dmu_read(ZTOZSB(zp)->z_os,
+ if (wr_state == WR_COPIED && dmu_read(ZTOZSB(zp)->z_os,
zp->z_id, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
zil_itx_destroy(itx);
itx = zil_itx_create(txtype, sizeof (*lr));
lr = (lr_write_t *)&itx->itx_lr;
- write_state = WR_NEED_COPY;
+ wr_state = WR_NEED_COPY;
}
- itx->itx_wr_state = write_state;
- if (write_state == WR_NEED_COPY)
- itx->itx_sod += len;
+ itx->itx_wr_state = wr_state;
lr->lr_foid = zp->z_id;
lr->lr_offset = off;
lr->lr_length = len;
(fsync_cnt == 0))
itx->itx_sync = B_FALSE;
+ itx->itx_callback = callback;
+ itx->itx_callback_data = callback_data;
zil_itx_assign(zilog, itx, tx);
off += len;
}
/*
- * zfs_log_truncate() handles TX_TRUNCATE transactions.
+ * Handles TX_TRUNCATE transactions.
*/
void
zfs_log_truncate(zilog_t *zilog, dmu_tx_t *tx, int txtype,
- znode_t *zp, uint64_t off, uint64_t len)
+ znode_t *zp, uint64_t off, uint64_t len)
{
itx_t *itx;
lr_truncate_t *lr;
- if (zil_replaying(zilog, tx) || zp->z_unlinked)
+ if (zil_replaying(zilog, tx) || zp->z_unlinked ||
+ zfs_xattr_owner_unlinked(zp))
return;
itx = zil_itx_create(txtype, sizeof (*lr));
}
/*
- * zfs_log_setattr() handles TX_SETATTR transactions.
+ * Handles TX_SETATTR transactions.
*/
void
-zfs_log_setattr(zilog_t *zilog, dmu_tx_t *tx, int txtype, znode_t *zp,
- struct iattr *attr, uint_t mask_applied, zfs_fuid_info_t *fuidp)
+zfs_log_setattr(zilog_t *zilog, dmu_tx_t *tx, int txtype,
+ znode_t *zp, vattr_t *vap, uint_t mask_applied, zfs_fuid_info_t *fuidp)
{
itx_t *itx;
lr_setattr_t *lr;
-#ifdef HAVE_XVATTR
xvattr_t *xvap = (xvattr_t *)vap;
-#endif /* HAVEXVATTR */
size_t recsize = sizeof (lr_setattr_t);
void *start;
* for lr_attr_t + xvattr mask, mapsize and create time
* plus actual attribute values
*/
-#ifdef HAVE_XVATTR
- if (attr->ia_valid & AT_XVATTR)
+ if (vap->va_mask & ATTR_XVATTR)
recsize = sizeof (*lr) + ZIL_XVAT_SIZE(xvap->xva_mapsize);
if (fuidp)
recsize += fuidp->z_domain_str_sz;
-#endif /* HAVE_XVATTR */
itx = zil_itx_create(txtype, recsize);
lr = (lr_setattr_t *)&itx->itx_lr;
lr->lr_foid = zp->z_id;
lr->lr_mask = (uint64_t)mask_applied;
- lr->lr_mode = (uint64_t)attr->ia_mode;
- if ((mask_applied & ATTR_UID) && IS_EPHEMERAL(attr->ia_uid))
+ lr->lr_mode = (uint64_t)vap->va_mode;
+ if ((mask_applied & ATTR_UID) && IS_EPHEMERAL(vap->va_uid))
lr->lr_uid = fuidp->z_fuid_owner;
else
- lr->lr_uid = (uint64_t)attr->ia_uid;
+ lr->lr_uid = (uint64_t)vap->va_uid;
- if ((mask_applied & ATTR_GID) && IS_EPHEMERAL(attr->ia_gid))
+ if ((mask_applied & ATTR_GID) && IS_EPHEMERAL(vap->va_gid))
lr->lr_gid = fuidp->z_fuid_group;
else
- lr->lr_gid = (uint64_t)attr->ia_gid;
+ lr->lr_gid = (uint64_t)vap->va_gid;
- lr->lr_size = (uint64_t)attr->ia_size;
- ZFS_TIME_ENCODE(&attr->ia_atime, lr->lr_atime);
- ZFS_TIME_ENCODE(&attr->ia_mtime, lr->lr_mtime);
+ lr->lr_size = (uint64_t)vap->va_size;
+ ZFS_TIME_ENCODE(&vap->va_atime, lr->lr_atime);
+ ZFS_TIME_ENCODE(&vap->va_mtime, lr->lr_mtime);
start = (lr_setattr_t *)(lr + 1);
-#ifdef HAVE_XVATTR
- if (attr->ia_valid & ATTR_XVATTR) {
+ if (vap->va_mask & ATTR_XVATTR) {
zfs_log_xvattr((lr_attr_t *)start, xvap);
start = (caddr_t)start + ZIL_XVAT_SIZE(xvap->xva_mapsize);
}
if (fuidp)
(void) zfs_log_fuid_domains(fuidp, start);
-#endif /* HAVE_XVATTR */
itx->itx_sync = (zp->z_sync_cnt != 0);
zil_itx_assign(zilog, itx, tx);
}
/*
- * zfs_log_acl() handles TX_ACL transactions.
+ * Handles TX_ACL transactions.
*/
void
zfs_log_acl(zilog_t *zilog, dmu_tx_t *tx, znode_t *zp,
start = (caddr_t)start + ZIL_ACE_LENGTH(aclbytes);
-#ifdef HAVE_XVATTR
if (fuidp) {
start = zfs_log_fuid_ids(fuidp, start);
(void) zfs_log_fuid_domains(fuidp, start);
}
-#endif /* HAVE_XVATTR */
}
itx->itx_sync = (zp->z_sync_cnt != 0);
zil_itx_assign(zilog, itx, tx);
}
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+module_param(zfs_immediate_write_sz, long, 0644);
+MODULE_PARM_DESC(zfs_immediate_write_sz, "Largest data block to write to zil");
+#endif