* Verify that this txg is active (open, quiescing, syncing). Non-active
* txg's should not be manipulated.
*/
+#ifdef ZFS_DEBUG
void
txg_verify(spa_t *spa, uint64_t txg)
{
ASSERT3U(txg, >=, dp->dp_tx.tx_synced_txg);
ASSERT3U(txg, >=, dp->dp_tx.tx_open_txg - TXG_CONCURRENT_STATES);
}
+#endif
/*
* Per-txg object lists.
tl->tl_head[t] = NULL;
}
+static boolean_t
+txg_list_empty_impl(txg_list_t *tl, uint64_t txg)
+{
+ ASSERT(MUTEX_HELD(&tl->tl_lock));
+ TXG_VERIFY(tl->tl_spa, txg);
+ return (tl->tl_head[txg & TXG_MASK] == NULL);
+}
+
+boolean_t
+txg_list_empty(txg_list_t *tl, uint64_t txg)
+{
+ mutex_enter(&tl->tl_lock);
+ boolean_t ret = txg_list_empty_impl(tl, txg);
+ mutex_exit(&tl->tl_lock);
+
+ return (ret);
+}
+
void
txg_list_destroy(txg_list_t *tl)
{
int t;
+ mutex_enter(&tl->tl_lock);
for (t = 0; t < TXG_SIZE; t++)
- ASSERT(txg_list_empty(tl, t));
+ ASSERT(txg_list_empty_impl(tl, t));
+ mutex_exit(&tl->tl_lock);
mutex_destroy(&tl->tl_lock);
}
-boolean_t
-txg_list_empty(txg_list_t *tl, uint64_t txg)
-{
- txg_verify(tl->tl_spa, txg);
- return (tl->tl_head[txg & TXG_MASK] == NULL);
-}
-
/*
* Returns true if all txg lists are empty.
*
* Warning: this is inherently racy (an item could be added immediately
- * after this function returns). We don't bother with the lock because
- * it wouldn't change the semantics.
+ * after this function returns).
*/
boolean_t
txg_all_lists_empty(txg_list_t *tl)
{
+ mutex_enter(&tl->tl_lock);
for (int i = 0; i < TXG_SIZE; i++) {
- if (!txg_list_empty(tl, i)) {
+ if (!txg_list_empty_impl(tl, i)) {
+ mutex_exit(&tl->tl_lock);
return (B_FALSE);
}
}
+ mutex_exit(&tl->tl_lock);
return (B_TRUE);
}
txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
boolean_t add;
- txg_verify(tl->tl_spa, txg);
+ TXG_VERIFY(tl->tl_spa, txg);
mutex_enter(&tl->tl_lock);
add = (tn->tn_member[t] == 0);
if (add) {
txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
boolean_t add;
- txg_verify(tl->tl_spa, txg);
+ TXG_VERIFY(tl->tl_spa, txg);
mutex_enter(&tl->tl_lock);
add = (tn->tn_member[t] == 0);
if (add) {
txg_node_t *tn;
void *p = NULL;
- txg_verify(tl->tl_spa, txg);
+ TXG_VERIFY(tl->tl_spa, txg);
mutex_enter(&tl->tl_lock);
if ((tn = tl->tl_head[t]) != NULL) {
ASSERT(tn->tn_member[t]);
int t = txg & TXG_MASK;
txg_node_t *tn, **tp;
- txg_verify(tl->tl_spa, txg);
+ TXG_VERIFY(tl->tl_spa, txg);
mutex_enter(&tl->tl_lock);
for (tp = &tl->tl_head[t]; (tn = *tp) != NULL; tp = &tn->tn_next[t]) {
int t = txg & TXG_MASK;
txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
- txg_verify(tl->tl_spa, txg);
+ TXG_VERIFY(tl->tl_spa, txg);
return (tn->tn_member[t] != 0);
}
/*
- * Walk a txg list -- only safe if you know it's not changing.
+ * Walk a txg list
*/
void *
txg_list_head(txg_list_t *tl, uint64_t txg)
{
int t = txg & TXG_MASK;
- txg_node_t *tn = tl->tl_head[t];
+ txg_node_t *tn;
+
+ mutex_enter(&tl->tl_lock);
+ tn = tl->tl_head[t];
+ mutex_exit(&tl->tl_lock);
- txg_verify(tl->tl_spa, txg);
+ TXG_VERIFY(tl->tl_spa, txg);
return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
}
int t = txg & TXG_MASK;
txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
- txg_verify(tl->tl_spa, txg);
+ TXG_VERIFY(tl->tl_spa, txg);
+
+ mutex_enter(&tl->tl_lock);
tn = tn->tn_next[t];
+ mutex_exit(&tl->tl_lock);
return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
}