/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Portions Copyright 2011 Martin Matuska
- * Copyright (c) 2013 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_scan.h>
#include <sys/callb.h>
+#include <sys/trace_txg.h>
/*
* ZFS Transaction Groups
tx->tx_threads = 2;
tx->tx_quiesce_thread = thread_create(NULL, 0, txg_quiesce_thread,
- dp, 0, &p0, TS_RUN, minclsyspri);
+ dp, 0, &p0, TS_RUN, defclsyspri);
/*
* The sync thread can need a larger-than-default stack size on
* scrub_visitbp() recursion.
*/
tx->tx_sync_thread = thread_create(NULL, 32<<10, txg_sync_thread,
- dp, 0, &p0, TS_RUN, minclsyspri);
+ dp, 0, &p0, TS_RUN, defclsyspri);
mutex_exit(&tx->tx_sync_lock);
}
CALLB_CPR_SAFE_BEGIN(cpr);
if (time)
- (void) cv_timedwait_interruptible(cv, &tx->tx_sync_lock,
+ (void) cv_timedwait_sig(cv, &tx->tx_sync_lock,
ddi_get_lbolt() + time);
else
- cv_wait_interruptible(cv, &tx->tx_sync_lock);
+ cv_wait_sig(cv, &tx->tx_sync_lock);
CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock);
}
tx->tx_open_txg++;
tx->tx_open_time = gethrtime();
- spa_txg_history_set(dp->dp_spa, txg, TXG_STATE_OPEN, gethrtime());
- spa_txg_history_add(dp->dp_spa, tx->tx_open_txg);
+ spa_txg_history_set(dp->dp_spa, txg, TXG_STATE_OPEN, tx->tx_open_time);
+ spa_txg_history_add(dp->dp_spa, tx->tx_open_txg, tx->tx_open_time);
DTRACE_PROBE2(txg__quiescing, dsl_pool_t *, dp, uint64_t, txg);
DTRACE_PROBE2(txg__opened, dsl_pool_t *, dp, uint64_t, tx->tx_open_txg);
* Commit callback taskq hasn't been created yet.
*/
tx->tx_commit_cb_taskq = taskq_create("tx_commit_cb",
- 100, minclsyspri, max_ncpus, INT_MAX,
- TASKQ_THREADS_CPU_PCT | TASKQ_PREPOPULATE);
+ max_ncpus, defclsyspri, max_ncpus, max_ncpus * 2,
+ TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
}
- cb_list = kmem_alloc(sizeof (list_t), KM_PUSHPAGE);
+ cb_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
list_create(cb_list, sizeof (dmu_tx_callback_t),
offsetof(dmu_tx_callback_t, dcb_node));
tx_state_t *tx = &dp->dp_tx;
if (tx->tx_commit_cb_taskq != NULL)
- taskq_wait(tx->tx_commit_cb_taskq);
+ taskq_wait_outstanding(tx->tx_commit_cb_taskq, 0);
}
static void
tx_state_t *tx = &dp->dp_tx;
callb_cpr_t cpr;
vdev_stat_t *vs1, *vs2;
- uint64_t start, delta;
-
-#ifdef _KERNEL
- /*
- * Annotate this process with a flag that indicates that it is
- * unsafe to use KM_SLEEP during memory allocations due to the
- * potential for a deadlock. KM_PUSHPAGE should be used instead.
- */
- current->flags |= PF_NOFS;
-#endif /* _KERNEL */
+ clock_t start, delta;
+ (void) spl_fstrans_mark();
txg_thread_enter(tx, &cpr);
- vs1 = kmem_alloc(sizeof(vdev_stat_t), KM_PUSHPAGE);
- vs2 = kmem_alloc(sizeof(vdev_stat_t), KM_PUSHPAGE);
+ vs1 = kmem_alloc(sizeof (vdev_stat_t), KM_SLEEP);
+ vs2 = kmem_alloc(sizeof (vdev_stat_t), KM_SLEEP);
start = delta = 0;
for (;;) {
- uint64_t timer, timeout;
+ clock_t timer, timeout;
uint64_t txg;
+ uint64_t ndirty;
timeout = zfs_txg_timeout * hz;
}
if (tx->tx_exiting) {
- kmem_free(vs2, sizeof(vdev_stat_t));
- kmem_free(vs1, sizeof(vdev_stat_t));
+ kmem_free(vs2, sizeof (vdev_stat_t));
+ kmem_free(vs1, sizeof (vdev_stat_t));
txg_thread_exit(tx, &cpr, &tx->tx_sync_thread);
}
+ spa_config_enter(spa, SCL_ALL, FTAG, RW_READER);
vdev_get_stats(spa->spa_root_vdev, vs1);
+ spa_config_exit(spa, SCL_ALL, FTAG);
/*
* Consume the quiesced txg which has been handed off to
txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
mutex_exit(&tx->tx_sync_lock);
+ spa_txg_history_set(spa, txg, TXG_STATE_WAIT_FOR_SYNC,
+ gethrtime());
+ ndirty = dp->dp_dirty_pertxg[txg & TXG_MASK];
+
start = ddi_get_lbolt();
spa_sync(spa, txg);
delta = ddi_get_lbolt() - start;
*/
txg_dispatch_callbacks(dp, txg);
+ spa_config_enter(spa, SCL_ALL, FTAG, RW_READER);
vdev_get_stats(spa->spa_root_vdev, vs2);
+ spa_config_exit(spa, SCL_ALL, FTAG);
spa_txg_history_set_io(spa, txg,
vs2->vs_bytes[ZIO_TYPE_READ]-vs1->vs_bytes[ZIO_TYPE_READ],
vs2->vs_bytes[ZIO_TYPE_WRITE]-vs1->vs_bytes[ZIO_TYPE_WRITE],
vs2->vs_ops[ZIO_TYPE_READ]-vs1->vs_ops[ZIO_TYPE_READ],
vs2->vs_ops[ZIO_TYPE_WRITE]-vs1->vs_ops[ZIO_TYPE_WRITE],
- dp->dp_dirty_pertxg[txg & TXG_MASK]);
+ ndirty);
spa_txg_history_set(spa, txg, TXG_STATE_SYNCED, gethrtime());
}
}
return (tl->tl_head[txg & TXG_MASK] == NULL);
}
+/*
+ * Returns true if all txg lists are empty.
+ *
+ * Warning: this is inherently racy (an item could be added immediately
+ * after this function returns). We don't bother with the lock because
+ * it wouldn't change the semantics.
+ */
+boolean_t
+txg_all_lists_empty(txg_list_t *tl)
+{
+ int i;
+
+ for (i = 0; i < TXG_SIZE; i++) {
+ if (!txg_list_empty(tl, i)) {
+ return (B_FALSE);
+ }
+ }
+ return (B_TRUE);
+}
+
/*
* Add an entry to the list (unless it's already on the list).
* Returns B_TRUE if it was actually added.