*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011 by Delphix. All rights reserved.
+ * Copyright (c) 2013 by Delphix. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
* See zil.h for more information about these fields.
*/
zil_stats_t zil_stats = {
- { "zil_commit_count", KSTAT_DATA_UINT64 },
- { "zil_commit_writer_count", KSTAT_DATA_UINT64 },
- { "zil_itx_count", KSTAT_DATA_UINT64 },
- { "zil_itx_indirect_count", KSTAT_DATA_UINT64 },
- { "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 },
- { "zil_itx_copied_count", KSTAT_DATA_UINT64 },
- { "zil_itx_copied_bytes", KSTAT_DATA_UINT64 },
- { "zil_itx_needcopy_count", KSTAT_DATA_UINT64 },
- { "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 },
- { "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 },
- { "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 },
- { "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 },
- { "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 },
+ { "zil_commit_count", KSTAT_DATA_UINT64 },
+ { "zil_commit_writer_count", KSTAT_DATA_UINT64 },
+ { "zil_itx_count", KSTAT_DATA_UINT64 },
+ { "zil_itx_indirect_count", KSTAT_DATA_UINT64 },
+ { "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 },
+ { "zil_itx_copied_count", KSTAT_DATA_UINT64 },
+ { "zil_itx_copied_bytes", KSTAT_DATA_UINT64 },
+ { "zil_itx_needcopy_count", KSTAT_DATA_UINT64 },
+ { "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 },
+ { "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 },
+ { "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 },
+ { "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 },
+ { "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 },
};
static kstat_t *zil_ksp;
/*
- * This global ZIL switch affects all pools
+ * Disable intent logging replay. This global ZIL switch affects all pools.
*/
-int zil_replay_disable = 0; /* disable intent logging replay */
+int zil_replay_disable = 0;
/*
* Tunable parameter for debugging or performance analysis. Setting
avl_index_t where;
if (avl_find(t, dva, &where) != NULL)
- return (EEXIST);
+ return (SET_ERROR(EEXIST));
zn = kmem_alloc(sizeof (zil_bp_node_t), KM_PUSHPAGE);
zn->zn_dva = *dva;
SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET],
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
- error = dsl_read_nolock(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
+ error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
if (error == 0) {
if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) {
- error = ECKSUM;
+ error = SET_ERROR(ECKSUM);
} else {
bcopy(lr, dst, len);
*end = (char *)dst + len;
if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) ||
(zilc->zc_nused > (size - sizeof (*zilc)))) {
- error = ECKSUM;
+ error = SET_ERROR(ECKSUM);
} else {
bcopy(lr, dst, zilc->zc_nused);
*end = (char *)dst + zilc->zc_nused;
}
}
- VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
+ VERIFY(arc_buf_remove_ref(abuf, &abuf));
}
return (error);
SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
- error = arc_read_nolock(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
+ error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
if (error == 0) {
char *lrbuf, *lrp;
int error = 0;
- bzero(&next_blk, sizeof(blkptr_t));
+ bzero(&next_blk, sizeof (blkptr_t));
/*
* Old logs didn't record the maximum zh_claim_lr_seq.
break;
error = zil_read_log_block(zilog, &blk, &next_blk, lrbuf, &end);
- if (error)
+ if (error != 0)
break;
for (lrp = lrbuf; lrp < end; lrp += reclen) {
return (lwb);
}
+/*
+ * Called when we create in-memory log transactions so that we know
+ * to cleanup the itxs at the end of spa_sync().
+ */
+void
+zilog_dirty(zilog_t *zilog, uint64_t txg)
+{
+ dsl_pool_t *dp = zilog->zl_dmu_pool;
+ dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
+
+ if (dsl_dataset_is_snapshot(ds))
+ panic("dirtying snapshot!");
+
+ if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
+ /* up the hold count until we can be written out */
+ dmu_buf_add_ref(ds->ds_dbuf, zilog);
+ }
+}
+
+boolean_t
+zilog_is_dirty(zilog_t *zilog)
+{
+ dsl_pool_t *dp = zilog->zl_dmu_pool;
+ int t;
+
+ for (t = 0; t < TXG_SIZE; t++) {
+ if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
+ return (B_TRUE);
+ }
+ return (B_FALSE);
+}
+
/*
* Create an on-disk intent log.
*/
kmem_cache_free(zil_lwb_cache, lwb);
}
} else if (!keep_first) {
- (void) zil_parse(zilog, zil_free_log_block,
- zil_free_log_record, tx, zh->zh_claim_txg);
+ zil_destroy_sync(zilog, tx);
}
mutex_exit(&zilog->zl_lock);
dmu_tx_commit(tx);
}
+void
+zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
+{
+ ASSERT(list_is_empty(&zilog->zl_lwb_list));
+ (void) zil_parse(zilog, zil_free_log_block,
+ zil_free_log_record, tx, zilog->zl_header->zh_claim_txg);
+}
+
int
zil_claim(const char *osname, void *txarg)
{
objset_t *os;
int error;
- error = dmu_objset_hold(osname, FTAG, &os);
- if (error) {
+ error = dmu_objset_own(osname, DMU_OST_ANY, B_FALSE, FTAG, &os);
+ if (error != 0) {
cmn_err(CE_WARN, "can't open objset for %s", osname);
return (0);
}
zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log);
BP_ZERO(&zh->zh_log);
dsl_dataset_dirty(dmu_objset_ds(os), tx);
- dmu_objset_rele(os, FTAG);
+ dmu_objset_disown(os, FTAG);
return (0);
}
}
ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
- dmu_objset_rele(os, FTAG);
+ dmu_objset_disown(os, FTAG);
return (0);
}
ASSERT(tx == NULL);
error = dmu_objset_hold(osname, FTAG, &os);
- if (error) {
+ if (error != 0) {
cmn_err(CE_WARN, "can't open objset for %s", osname);
return (0);
}
}
lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa,
0, &lwb->lwb_blk, lwb->lwb_buf, BP_GET_LSIZE(&lwb->lwb_blk),
- zil_lwb_write_done, lwb, ZIO_PRIORITY_LOG_WRITE,
+ zil_lwb_write_done, lwb, ZIO_PRIORITY_SYNC_WRITE,
ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_FASTWRITE, &zb);
}
/*
* Define a limited set of intent log block sizes.
+ *
* These must be a multiple of 4KB. Note only the amount used (again
* aligned to 4KB) actually gets written. However, we can't always just
* allocate SPA_MAXBLOCKSIZE as the slog space could be exhausted.
BP_ZERO(bp);
use_slog = USE_SLOG(zilog);
- error = zio_alloc_zil(spa, txg, bp, zil_blksz, USE_SLOG(zilog));
- if (use_slog)
- {
+ error = zio_alloc_zil(spa, txg, bp, zil_blksz,
+ USE_SLOG(zilog));
+ if (use_slog) {
ZIL_STAT_BUMP(zil_itx_metaslab_slog_count);
ZIL_STAT_INCR(zil_itx_metaslab_slog_bytes, lwb->lwb_nused);
- }
- else
- {
+ } else {
ZIL_STAT_BUMP(zil_itx_metaslab_normal_count);
ZIL_STAT_INCR(zil_itx_metaslab_normal_bytes, lwb->lwb_nused);
}
- if (!error) {
+ if (error == 0) {
ASSERT3U(bp->blk_birth, ==, txg);
bp->blk_cksum = lwb->lwb_blk.blk_cksum;
bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
return (NULL);
ASSERT(lwb->lwb_buf != NULL);
+ ASSERT(zilog_is_dirty(zilog) ||
+ spa_freeze_txg(zilog->zl_spa) != UINT64_MAX);
if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY)
dlen = P2ROUNDUP_TYPED(
dbuf = lr_buf + reclen;
lrw->lr_common.lrc_reclen += dlen;
ZIL_STAT_BUMP(zil_itx_needcopy_count);
- ZIL_STAT_INCR(zil_itx_needcopy_bytes, lrw->lr_length);
+ ZIL_STAT_INCR(zil_itx_needcopy_bytes,
+ lrw->lr_length);
} else {
ASSERT(itx->itx_wr_state == WR_INDIRECT);
dbuf = NULL;
ZIL_STAT_BUMP(zil_itx_indirect_count);
- ZIL_STAT_INCR(zil_itx_indirect_bytes, lrw->lr_length);
+ ZIL_STAT_INCR(zil_itx_indirect_bytes,
+ lrw->lr_length);
}
error = zilog->zl_get_data(
itx->itx_private, lrw, dbuf, lwb->lwb_zio);
txg_wait_synced(zilog->zl_dmu_pool, txg);
return (lwb);
}
- if (error) {
+ if (error != 0) {
ASSERT(error == ENOENT || error == EEXIST ||
error == EALREADY);
return (lwb);
lwb->lwb_nused += reclen + dlen;
lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz);
- ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0);
+ ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)));
return (lwb);
}
itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */
itx->itx_lr.lrc_seq = 0; /* defensive */
itx->itx_sync = B_TRUE; /* default is synchronous */
+ itx->itx_callback = NULL;
+ itx->itx_callback_data = NULL;
return (itx);
}
list = &itxs->i_sync_list;
while ((itx = list_head(list)) != NULL) {
+ if (itx->itx_callback != NULL)
+ itx->itx_callback(itx->itx_callback_data);
list_remove(list, itx);
kmem_free(itx, offsetof(itx_t, itx_lr) +
itx->itx_lr.lrc_reclen);
while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
list = &ian->ia_list;
while ((itx = list_head(list)) != NULL) {
+ if (itx->itx_callback != NULL)
+ itx->itx_callback(itx->itx_callback_data);
list_remove(list, itx);
kmem_free(itx, offsetof(itx_t, itx_lr) +
itx->itx_lr.lrc_reclen);
mutex_exit(&itxg->itxg_lock);
}
while ((itx = list_head(&clean_list)) != NULL) {
+ if (itx->itx_callback != NULL)
+ itx->itx_callback(itx->itx_callback_data);
list_remove(&clean_list, itx);
kmem_free(itx, offsetof(itx_t, itx_lr) +
itx->itx_lr.lrc_reclen);
if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME)
zil_async_to_sync(zilog, itx->itx_oid);
- if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
+ if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
txg = ZILTEST_TXG;
else
txg = dmu_tx_get_txg(tx);
}
ASSERT(itxg->itxg_sod == 0);
itxg->itxg_txg = txg;
- itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), KM_PUSHPAGE);
+ itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t),
+ KM_PUSHPAGE);
list_create(&itxs->i_sync_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
ian = avl_find(t, &foid, &where);
if (ian == NULL) {
- ian = kmem_alloc(sizeof (itx_async_node_t), KM_PUSHPAGE);
+ ian = kmem_alloc(sizeof (itx_async_node_t),
+ KM_PUSHPAGE);
list_create(&ian->ia_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
ian->ia_foid = foid;
}
itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
+ zilog_dirty(zilog, txg);
mutex_exit(&itxg->itxg_lock);
/* Release the old itxs now we've dropped the lock */
/*
* If there are any in-memory intent log transactions which have now been
- * synced then start up a taskq to free them.
+ * synced then start up a taskq to free them. We should only do this after we
+ * have written out the uberblocks (i.e. txg has been comitted) so that
+ * don't inadvertently clean out in-memory log records that would be required
+ * by zil_commit().
*/
void
zil_clean(zilog_t *zilog, uint64_t synced_txg)
}
DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
- while ((itx = list_head(&zilog->zl_itx_commit_list))) {
+ for (itx = list_head(&zilog->zl_itx_commit_list); itx != NULL;
+ itx = list_next(&zilog->zl_itx_commit_list, itx)) {
txg = itx->itx_lr.lrc_txg;
ASSERT(txg);
if (txg > spa_last_synced_txg(spa) || txg > spa_freeze_txg(spa))
lwb = zil_lwb_commit(zilog, itx, lwb);
- list_remove(&zilog->zl_itx_commit_list, itx);
- kmem_free(itx, offsetof(itx_t, itx_lr)
- + itx->itx_lr.lrc_reclen);
}
DTRACE_PROBE1(zil__cw2, zilog_t *, zilog);
if (error || lwb == NULL)
txg_wait_synced(zilog->zl_dmu_pool, 0);
+ while ((itx = list_head(&zilog->zl_itx_commit_list))) {
+ txg = itx->itx_lr.lrc_txg;
+ ASSERT(txg);
+
+ if (itx->itx_callback != NULL)
+ itx->itx_callback(itx->itx_callback_data);
+ list_remove(&zilog->zl_itx_commit_list, itx);
+ kmem_free(itx, offsetof(itx_t, itx_lr)
+ + itx->itx_lr.lrc_reclen);
+ }
+
mutex_enter(&zilog->zl_lock);
/*
sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0);
zil_ksp = kstat_create("zfs", 0, "zil", "misc",
- KSTAT_TYPE_NAMED, sizeof(zil_stats) / sizeof(kstat_named_t),
+ KSTAT_TYPE_NAMED, sizeof (zil_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (zil_ksp != NULL) {
zilog->zl_stop_sync = 1;
+ ASSERT0(zilog->zl_suspend);
+ ASSERT0(zilog->zl_suspending);
+
ASSERT(list_is_empty(&zilog->zl_lwb_list));
list_destroy(&zilog->zl_lwb_list);
mutex_exit(&zilog->zl_lock);
if (txg)
txg_wait_synced(zilog->zl_dmu_pool, txg);
+ ASSERT(!zilog_is_dirty(zilog));
taskq_destroy(zilog->zl_clean_taskq);
zilog->zl_clean_taskq = NULL;
mutex_exit(&zilog->zl_lock);
}
+static char *suspend_tag = "zil suspending";
+
/*
* Suspend an intent log. While in suspended mode, we still honor
* synchronous semantics, but we rely on txg_wait_synced() to do it.
- * We suspend the log briefly when taking a snapshot so that the snapshot
- * contains all the data it's supposed to, and has an empty intent log.
+ * On old version pools, we suspend the log briefly when taking a
+ * snapshot so that it will have an empty intent log.
+ *
+ * Long holds are not really intended to be used the way we do here --
+ * held for such a short time. A concurrent caller of dsl_dataset_long_held()
+ * could fail. Therefore we take pains to only put a long hold if it is
+ * actually necessary. Fortunately, it will only be necessary if the
+ * objset is currently mounted (or the ZVOL equivalent). In that case it
+ * will already have a long hold, so we are not really making things any worse.
+ *
+ * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or
+ * zvol_state_t), and use their mechanism to prevent their hold from being
+ * dropped (e.g. VFS_HOLD()). However, that would be even more pain for
+ * very little gain.
+ *
+ * if cookiep == NULL, this does both the suspend & resume.
+ * Otherwise, it returns with the dataset "long held", and the cookie
+ * should be passed into zil_resume().
*/
int
-zil_suspend(zilog_t *zilog)
+zil_suspend(const char *osname, void **cookiep)
{
- const zil_header_t *zh = zilog->zl_header;
+ objset_t *os;
+ zilog_t *zilog;
+ const zil_header_t *zh;
+ int error;
+
+ error = dmu_objset_hold(osname, suspend_tag, &os);
+ if (error != 0)
+ return (error);
+ zilog = dmu_objset_zil(os);
mutex_enter(&zilog->zl_lock);
+ zh = zilog->zl_header;
+
if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */
mutex_exit(&zilog->zl_lock);
- return (EBUSY);
+ dmu_objset_rele(os, suspend_tag);
+ return (SET_ERROR(EBUSY));
}
- if (zilog->zl_suspend++ != 0) {
+
+ /*
+ * Don't put a long hold in the cases where we can avoid it. This
+ * is when there is no cookie so we are doing a suspend & resume
+ * (i.e. called from zil_vdev_offline()), and there's nothing to do
+ * for the suspend because it's already suspended, or there's no ZIL.
+ */
+ if (cookiep == NULL && !zilog->zl_suspending &&
+ (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) {
+ mutex_exit(&zilog->zl_lock);
+ dmu_objset_rele(os, suspend_tag);
+ return (0);
+ }
+
+ dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag);
+ dsl_pool_rele(dmu_objset_pool(os), suspend_tag);
+
+ zilog->zl_suspend++;
+
+ if (zilog->zl_suspend > 1) {
/*
- * Someone else already began a suspend.
+ * Someone else is already suspending it.
* Just wait for them to finish.
*/
+
while (zilog->zl_suspending)
cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
mutex_exit(&zilog->zl_lock);
+
+ if (cookiep == NULL)
+ zil_resume(os);
+ else
+ *cookiep = os;
return (0);
}
+
+ /*
+ * If there is no pointer to an on-disk block, this ZIL must not
+ * be active (e.g. filesystem not mounted), so there's nothing
+ * to clean up.
+ */
+ if (BP_IS_HOLE(&zh->zh_log)) {
+ ASSERT(cookiep != NULL); /* fast path already handled */
+
+ *cookiep = os;
+ mutex_exit(&zilog->zl_lock);
+ return (0);
+ }
+
zilog->zl_suspending = B_TRUE;
mutex_exit(&zilog->zl_lock);
cv_broadcast(&zilog->zl_cv_suspend);
mutex_exit(&zilog->zl_lock);
+ if (cookiep == NULL)
+ zil_resume(os);
+ else
+ *cookiep = os;
return (0);
}
void
-zil_resume(zilog_t *zilog)
+zil_resume(void *cookie)
{
+ objset_t *os = cookie;
+ zilog_t *zilog = dmu_objset_zil(os);
+
mutex_enter(&zilog->zl_lock);
ASSERT(zilog->zl_suspend != 0);
zilog->zl_suspend--;
mutex_exit(&zilog->zl_lock);
+ dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
+ dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
}
typedef struct zil_replay_arg {
- zil_replay_func_t **zr_replay;
+ zil_replay_func_t *zr_replay;
void *zr_arg;
boolean_t zr_byteswap;
char *zr_lr;
if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
error = zil_read_log_data(zilog, (lr_write_t *)lr,
zr->zr_lr + reclen);
- if (error)
+ if (error != 0)
return (zil_replay_error(zilog, lr, error));
}
* is updated if we are in replay mode.
*/
error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap);
- if (error) {
+ if (error != 0) {
/*
* The DMU's dnode layer doesn't see removes until the txg
* commits, so a subsequent claim can spuriously fail with
*/
txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE);
- if (error)
+ if (error != 0)
return (zil_replay_error(zilog, lr, error));
}
return (0);
* If this dataset has a non-empty intent log, replay it and destroy it.
*/
void
-zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
+zil_replay(objset_t *os, void *arg, zil_replay_func_t replay_func[TX_MAX_TYPE])
{
zilog_t *zilog = dmu_objset_zil(os);
const zil_header_t *zh = zilog->zl_header;
int
zil_vdev_offline(const char *osname, void *arg)
{
- objset_t *os;
- zilog_t *zilog;
int error;
- error = dmu_objset_hold(osname, FTAG, &os);
- if (error)
- return (error);
-
- zilog = dmu_objset_zil(os);
- if (zil_suspend(zilog) != 0)
- error = EEXIST;
- else
- zil_resume(zilog);
- dmu_objset_rele(os, FTAG);
- return (error);
+ error = zil_suspend(osname, NULL);
+ if (error != 0)
+ return (SET_ERROR(EEXIST));
+ return (0);
}
#if defined(_KERNEL) && defined(HAVE_SPL)