4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Portions Copyright 2011 Martin Matuska
24 * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
27 #include <sys/zfs_context.h>
28 #include <sys/txg_impl.h>
29 #include <sys/dmu_impl.h>
30 #include <sys/spa_impl.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dsl_pool.h>
33 #include <sys/dsl_scan.h>
35 #include <sys/callb.h>
36 #include <sys/trace_zfs.h>
39 * ZFS Transaction Groups
40 * ----------------------
42 * ZFS transaction groups are, as the name implies, groups of transactions
43 * that act on persistent state. ZFS asserts consistency at the granularity of
44 * these transaction groups. Each successive transaction group (txg) is
45 * assigned a 64-bit consecutive identifier. There are three active
46 * transaction group states: open, quiescing, or syncing. At any given time,
47 * there may be an active txg associated with each state; each active txg may
48 * either be processing, or blocked waiting to enter the next state. There may
49 * be up to three active txgs, and there is always a txg in the open state
50 * (though it may be blocked waiting to enter the quiescing state). In broad
51 * strokes, transactions -- operations that change in-memory structures -- are
52 * accepted into the txg in the open state, and are completed while the txg is
53 * in the open or quiescing states. The accumulated changes are written to
54 * disk in the syncing state.
58 * When a new txg becomes active, it first enters the open state. New
59 * transactions -- updates to in-memory structures -- are assigned to the
60 * currently open txg. There is always a txg in the open state so that ZFS can
61 * accept new changes (though the txg may refuse new changes if it has hit
62 * some limit). ZFS advances the open txg to the next state for a variety of
63 * reasons such as it hitting a time or size threshold, or the execution of an
64 * administrative action that must be completed in the syncing state.
68 * After a txg exits the open state, it enters the quiescing state. The
69 * quiescing state is intended to provide a buffer between accepting new
70 * transactions in the open state and writing them out to stable storage in
71 * the syncing state. While quiescing, transactions can continue their
72 * operation without delaying either of the other states. Typically, a txg is
73 * in the quiescing state very briefly since the operations are bounded by
74 * software latencies rather than, say, slower I/O latencies. After all
75 * transactions complete, the txg is ready to enter the next state.
79 * In the syncing state, the in-memory state built up during the open and (to
80 * a lesser degree) the quiescing states is written to stable storage. The
81 * process of writing out modified data can, in turn modify more data. For
82 * example when we write new blocks, we need to allocate space for them; those
83 * allocations modify metadata (space maps)... which themselves must be
84 * written to stable storage. During the sync state, ZFS iterates, writing out
85 * data until it converges and all in-memory changes have been written out.
86 * The first such pass is the largest as it encompasses all the modified user
87 * data (as opposed to filesystem metadata). Subsequent passes typically have
88 * far less data to write as they consist exclusively of filesystem metadata.
90 * To ensure convergence, after a certain number of passes ZFS begins
91 * overwriting locations on stable storage that had been allocated earlier in
92 * the syncing state (and subsequently freed). ZFS usually allocates new
93 * blocks to optimize for large, continuous, writes. For the syncing state to
94 * converge however it must complete a pass where no new blocks are allocated
95 * since each allocation requires a modification of persistent metadata.
96 * Further, to hasten convergence, after a prescribed number of passes, ZFS
97 * also defers frees, and stops compressing.
99 * In addition to writing out user data, we must also execute synctasks during
100 * the syncing context. A synctask is the mechanism by which some
101 * administrative activities work such as creating and destroying snapshots or
102 * datasets. Note that when a synctask is initiated it enters the open txg,
103 * and ZFS then pushes that txg as quickly as possible to completion of the
104 * syncing state in order to reduce the latency of the administrative
105 * activity. To complete the syncing state, ZFS writes out a new uberblock,
106 * the root of the tree of blocks that comprise all state stored on the ZFS
107 * pool. Finally, if there is a quiesced txg waiting, we signal that it can
108 * now transition to the syncing state.
111 static void txg_sync_thread(void *arg
);
112 static void txg_quiesce_thread(void *arg
);
114 int zfs_txg_timeout
= 5; /* max seconds worth of delta per txg */
117 * Prepare the txg subsystem.
120 txg_init(dsl_pool_t
*dp
, uint64_t txg
)
122 tx_state_t
*tx
= &dp
->dp_tx
;
124 bzero(tx
, sizeof (tx_state_t
));
126 tx
->tx_cpu
= vmem_zalloc(max_ncpus
* sizeof (tx_cpu_t
), KM_SLEEP
);
128 for (c
= 0; c
< max_ncpus
; c
++) {
131 mutex_init(&tx
->tx_cpu
[c
].tc_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
132 mutex_init(&tx
->tx_cpu
[c
].tc_open_lock
, NULL
, MUTEX_NOLOCKDEP
,
134 for (i
= 0; i
< TXG_SIZE
; i
++) {
135 cv_init(&tx
->tx_cpu
[c
].tc_cv
[i
], NULL
, CV_DEFAULT
,
137 list_create(&tx
->tx_cpu
[c
].tc_callbacks
[i
],
138 sizeof (dmu_tx_callback_t
),
139 offsetof(dmu_tx_callback_t
, dcb_node
));
143 mutex_init(&tx
->tx_sync_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
145 cv_init(&tx
->tx_sync_more_cv
, NULL
, CV_DEFAULT
, NULL
);
146 cv_init(&tx
->tx_sync_done_cv
, NULL
, CV_DEFAULT
, NULL
);
147 cv_init(&tx
->tx_quiesce_more_cv
, NULL
, CV_DEFAULT
, NULL
);
148 cv_init(&tx
->tx_quiesce_done_cv
, NULL
, CV_DEFAULT
, NULL
);
149 cv_init(&tx
->tx_exit_cv
, NULL
, CV_DEFAULT
, NULL
);
151 tx
->tx_open_txg
= txg
;
155 * Close down the txg subsystem.
158 txg_fini(dsl_pool_t
*dp
)
160 tx_state_t
*tx
= &dp
->dp_tx
;
163 ASSERT0(tx
->tx_threads
);
165 mutex_destroy(&tx
->tx_sync_lock
);
167 cv_destroy(&tx
->tx_sync_more_cv
);
168 cv_destroy(&tx
->tx_sync_done_cv
);
169 cv_destroy(&tx
->tx_quiesce_more_cv
);
170 cv_destroy(&tx
->tx_quiesce_done_cv
);
171 cv_destroy(&tx
->tx_exit_cv
);
173 for (c
= 0; c
< max_ncpus
; c
++) {
176 mutex_destroy(&tx
->tx_cpu
[c
].tc_open_lock
);
177 mutex_destroy(&tx
->tx_cpu
[c
].tc_lock
);
178 for (i
= 0; i
< TXG_SIZE
; i
++) {
179 cv_destroy(&tx
->tx_cpu
[c
].tc_cv
[i
]);
180 list_destroy(&tx
->tx_cpu
[c
].tc_callbacks
[i
]);
184 if (tx
->tx_commit_cb_taskq
!= NULL
)
185 taskq_destroy(tx
->tx_commit_cb_taskq
);
187 vmem_free(tx
->tx_cpu
, max_ncpus
* sizeof (tx_cpu_t
));
189 bzero(tx
, sizeof (tx_state_t
));
193 * Start syncing transaction groups.
196 txg_sync_start(dsl_pool_t
*dp
)
198 tx_state_t
*tx
= &dp
->dp_tx
;
200 mutex_enter(&tx
->tx_sync_lock
);
202 dprintf("pool %p\n", dp
);
204 ASSERT0(tx
->tx_threads
);
208 tx
->tx_quiesce_thread
= thread_create(NULL
, 0, txg_quiesce_thread
,
209 dp
, 0, &p0
, TS_RUN
, defclsyspri
);
212 * The sync thread can need a larger-than-default stack size on
213 * 32-bit x86. This is due in part to nested pools and
214 * scrub_visitbp() recursion.
216 tx
->tx_sync_thread
= thread_create(NULL
, 0, txg_sync_thread
,
217 dp
, 0, &p0
, TS_RUN
, defclsyspri
);
219 mutex_exit(&tx
->tx_sync_lock
);
223 txg_thread_enter(tx_state_t
*tx
, callb_cpr_t
*cpr
)
225 CALLB_CPR_INIT(cpr
, &tx
->tx_sync_lock
, callb_generic_cpr
, FTAG
);
226 mutex_enter(&tx
->tx_sync_lock
);
230 txg_thread_exit(tx_state_t
*tx
, callb_cpr_t
*cpr
, kthread_t
**tpp
)
232 ASSERT(*tpp
!= NULL
);
235 cv_broadcast(&tx
->tx_exit_cv
);
236 CALLB_CPR_EXIT(cpr
); /* drops &tx->tx_sync_lock */
241 txg_thread_wait(tx_state_t
*tx
, callb_cpr_t
*cpr
, kcondvar_t
*cv
, clock_t time
)
243 CALLB_CPR_SAFE_BEGIN(cpr
);
246 * cv_wait_sig() is used instead of cv_wait() in order to prevent
247 * this process from incorrectly contributing to the system load
251 (void) cv_timedwait_sig(cv
, &tx
->tx_sync_lock
,
252 ddi_get_lbolt() + time
);
254 cv_wait_sig(cv
, &tx
->tx_sync_lock
);
257 CALLB_CPR_SAFE_END(cpr
, &tx
->tx_sync_lock
);
261 * Stop syncing transaction groups.
264 txg_sync_stop(dsl_pool_t
*dp
)
266 tx_state_t
*tx
= &dp
->dp_tx
;
268 dprintf("pool %p\n", dp
);
270 * Finish off any work in progress.
272 ASSERT3U(tx
->tx_threads
, ==, 2);
275 * We need to ensure that we've vacated the deferred metaslab trees.
277 txg_wait_synced(dp
, tx
->tx_open_txg
+ TXG_DEFER_SIZE
);
280 * Wake all sync threads and wait for them to die.
282 mutex_enter(&tx
->tx_sync_lock
);
284 ASSERT3U(tx
->tx_threads
, ==, 2);
288 cv_broadcast(&tx
->tx_quiesce_more_cv
);
289 cv_broadcast(&tx
->tx_quiesce_done_cv
);
290 cv_broadcast(&tx
->tx_sync_more_cv
);
292 while (tx
->tx_threads
!= 0)
293 cv_wait(&tx
->tx_exit_cv
, &tx
->tx_sync_lock
);
297 mutex_exit(&tx
->tx_sync_lock
);
301 txg_hold_open(dsl_pool_t
*dp
, txg_handle_t
*th
)
303 tx_state_t
*tx
= &dp
->dp_tx
;
308 * It appears the processor id is simply used as a "random"
309 * number to index into the array, and there isn't any other
310 * significance to the chosen tx_cpu. Because.. Why not use
311 * the current cpu to index into the array?
314 tc
= &tx
->tx_cpu
[CPU_SEQID
];
317 mutex_enter(&tc
->tc_open_lock
);
318 txg
= tx
->tx_open_txg
;
320 mutex_enter(&tc
->tc_lock
);
321 tc
->tc_count
[txg
& TXG_MASK
]++;
322 mutex_exit(&tc
->tc_lock
);
331 txg_rele_to_quiesce(txg_handle_t
*th
)
333 tx_cpu_t
*tc
= th
->th_cpu
;
335 ASSERT(!MUTEX_HELD(&tc
->tc_lock
));
336 mutex_exit(&tc
->tc_open_lock
);
340 txg_register_callbacks(txg_handle_t
*th
, list_t
*tx_callbacks
)
342 tx_cpu_t
*tc
= th
->th_cpu
;
343 int g
= th
->th_txg
& TXG_MASK
;
345 mutex_enter(&tc
->tc_lock
);
346 list_move_tail(&tc
->tc_callbacks
[g
], tx_callbacks
);
347 mutex_exit(&tc
->tc_lock
);
351 txg_rele_to_sync(txg_handle_t
*th
)
353 tx_cpu_t
*tc
= th
->th_cpu
;
354 int g
= th
->th_txg
& TXG_MASK
;
356 mutex_enter(&tc
->tc_lock
);
357 ASSERT(tc
->tc_count
[g
] != 0);
358 if (--tc
->tc_count
[g
] == 0)
359 cv_broadcast(&tc
->tc_cv
[g
]);
360 mutex_exit(&tc
->tc_lock
);
362 th
->th_cpu
= NULL
; /* defensive */
366 * Blocks until all transactions in the group are committed.
368 * On return, the transaction group has reached a stable state in which it can
369 * then be passed off to the syncing context.
372 txg_quiesce(dsl_pool_t
*dp
, uint64_t txg
)
374 tx_state_t
*tx
= &dp
->dp_tx
;
375 uint64_t tx_open_time
;
376 int g
= txg
& TXG_MASK
;
380 * Grab all tc_open_locks so nobody else can get into this txg.
382 for (c
= 0; c
< max_ncpus
; c
++)
383 mutex_enter(&tx
->tx_cpu
[c
].tc_open_lock
);
385 ASSERT(txg
== tx
->tx_open_txg
);
387 tx
->tx_open_time
= tx_open_time
= gethrtime();
389 DTRACE_PROBE2(txg__quiescing
, dsl_pool_t
*, dp
, uint64_t, txg
);
390 DTRACE_PROBE2(txg__opened
, dsl_pool_t
*, dp
, uint64_t, tx
->tx_open_txg
);
393 * Now that we've incremented tx_open_txg, we can let threads
394 * enter the next transaction group.
396 for (c
= 0; c
< max_ncpus
; c
++)
397 mutex_exit(&tx
->tx_cpu
[c
].tc_open_lock
);
399 spa_txg_history_set(dp
->dp_spa
, txg
, TXG_STATE_OPEN
, tx_open_time
);
400 spa_txg_history_add(dp
->dp_spa
, txg
+ 1, tx_open_time
);
403 * Quiesce the transaction group by waiting for everyone to txg_exit().
405 for (c
= 0; c
< max_ncpus
; c
++) {
406 tx_cpu_t
*tc
= &tx
->tx_cpu
[c
];
407 mutex_enter(&tc
->tc_lock
);
408 while (tc
->tc_count
[g
] != 0)
409 cv_wait(&tc
->tc_cv
[g
], &tc
->tc_lock
);
410 mutex_exit(&tc
->tc_lock
);
413 spa_txg_history_set(dp
->dp_spa
, txg
, TXG_STATE_QUIESCED
, gethrtime());
417 txg_do_callbacks(list_t
*cb_list
)
419 dmu_tx_do_callbacks(cb_list
, 0);
421 list_destroy(cb_list
);
423 kmem_free(cb_list
, sizeof (list_t
));
427 * Dispatch the commit callbacks registered on this txg to worker threads.
429 * If no callbacks are registered for a given TXG, nothing happens.
430 * This function creates a taskq for the associated pool, if needed.
433 txg_dispatch_callbacks(dsl_pool_t
*dp
, uint64_t txg
)
436 tx_state_t
*tx
= &dp
->dp_tx
;
439 for (c
= 0; c
< max_ncpus
; c
++) {
440 tx_cpu_t
*tc
= &tx
->tx_cpu
[c
];
442 * No need to lock tx_cpu_t at this point, since this can
443 * only be called once a txg has been synced.
446 int g
= txg
& TXG_MASK
;
448 if (list_is_empty(&tc
->tc_callbacks
[g
]))
451 if (tx
->tx_commit_cb_taskq
== NULL
) {
453 * Commit callback taskq hasn't been created yet.
455 tx
->tx_commit_cb_taskq
= taskq_create("tx_commit_cb",
456 max_ncpus
, defclsyspri
, max_ncpus
, max_ncpus
* 2,
457 TASKQ_PREPOPULATE
| TASKQ_DYNAMIC
);
460 cb_list
= kmem_alloc(sizeof (list_t
), KM_SLEEP
);
461 list_create(cb_list
, sizeof (dmu_tx_callback_t
),
462 offsetof(dmu_tx_callback_t
, dcb_node
));
464 list_move_tail(cb_list
, &tc
->tc_callbacks
[g
]);
466 (void) taskq_dispatch(tx
->tx_commit_cb_taskq
, (task_func_t
*)
467 txg_do_callbacks
, cb_list
, TQ_SLEEP
);
472 * Wait for pending commit callbacks of already-synced transactions to finish
474 * Calling this function from within a commit callback will deadlock.
477 txg_wait_callbacks(dsl_pool_t
*dp
)
479 tx_state_t
*tx
= &dp
->dp_tx
;
481 if (tx
->tx_commit_cb_taskq
!= NULL
)
482 taskq_wait_outstanding(tx
->tx_commit_cb_taskq
, 0);
486 txg_is_syncing(dsl_pool_t
*dp
)
488 tx_state_t
*tx
= &dp
->dp_tx
;
489 ASSERT(MUTEX_HELD(&tx
->tx_sync_lock
));
490 return (tx
->tx_syncing_txg
!= 0);
494 txg_is_quiescing(dsl_pool_t
*dp
)
496 tx_state_t
*tx
= &dp
->dp_tx
;
497 ASSERT(MUTEX_HELD(&tx
->tx_sync_lock
));
498 return (tx
->tx_quiescing_txg
!= 0);
502 txg_has_quiesced_to_sync(dsl_pool_t
*dp
)
504 tx_state_t
*tx
= &dp
->dp_tx
;
505 ASSERT(MUTEX_HELD(&tx
->tx_sync_lock
));
506 return (tx
->tx_quiesced_txg
!= 0);
510 txg_sync_thread(void *arg
)
512 dsl_pool_t
*dp
= arg
;
513 spa_t
*spa
= dp
->dp_spa
;
514 tx_state_t
*tx
= &dp
->dp_tx
;
516 clock_t start
, delta
;
518 (void) spl_fstrans_mark();
519 txg_thread_enter(tx
, &cpr
);
523 clock_t timeout
= zfs_txg_timeout
* hz
;
526 uint64_t dirty_min_bytes
=
527 zfs_dirty_data_max
* zfs_dirty_data_sync_percent
/ 100;
530 * We sync when we're scanning, there's someone waiting
531 * on us, or the quiesce thread has handed off a txg to
532 * us, or we have reached our timeout.
534 timer
= (delta
>= timeout
? 0 : timeout
- delta
);
535 while (!dsl_scan_active(dp
->dp_scan
) &&
536 !tx
->tx_exiting
&& timer
> 0 &&
537 tx
->tx_synced_txg
>= tx
->tx_sync_txg_waiting
&&
538 !txg_has_quiesced_to_sync(dp
) &&
539 dp
->dp_dirty_total
< dirty_min_bytes
) {
540 dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
541 tx
->tx_synced_txg
, tx
->tx_sync_txg_waiting
, dp
);
542 txg_thread_wait(tx
, &cpr
, &tx
->tx_sync_more_cv
, timer
);
543 delta
= ddi_get_lbolt() - start
;
544 timer
= (delta
> timeout
? 0 : timeout
- delta
);
548 * Wait until the quiesce thread hands off a txg to us,
549 * prompting it to do so if necessary.
551 while (!tx
->tx_exiting
&& !txg_has_quiesced_to_sync(dp
)) {
552 if (tx
->tx_quiesce_txg_waiting
< tx
->tx_open_txg
+1)
553 tx
->tx_quiesce_txg_waiting
= tx
->tx_open_txg
+1;
554 cv_broadcast(&tx
->tx_quiesce_more_cv
);
555 txg_thread_wait(tx
, &cpr
, &tx
->tx_quiesce_done_cv
, 0);
559 txg_thread_exit(tx
, &cpr
, &tx
->tx_sync_thread
);
562 * Consume the quiesced txg which has been handed off to
563 * us. This may cause the quiescing thread to now be
564 * able to quiesce another txg, so we must signal it.
566 ASSERT(tx
->tx_quiesced_txg
!= 0);
567 txg
= tx
->tx_quiesced_txg
;
568 tx
->tx_quiesced_txg
= 0;
569 tx
->tx_syncing_txg
= txg
;
570 DTRACE_PROBE2(txg__syncing
, dsl_pool_t
*, dp
, uint64_t, txg
);
571 cv_broadcast(&tx
->tx_quiesce_more_cv
);
573 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
574 txg
, tx
->tx_quiesce_txg_waiting
, tx
->tx_sync_txg_waiting
);
575 mutex_exit(&tx
->tx_sync_lock
);
577 txg_stat_t
*ts
= spa_txg_history_init_io(spa
, txg
, dp
);
578 start
= ddi_get_lbolt();
580 delta
= ddi_get_lbolt() - start
;
581 spa_txg_history_fini_io(spa
, ts
);
583 mutex_enter(&tx
->tx_sync_lock
);
584 tx
->tx_synced_txg
= txg
;
585 tx
->tx_syncing_txg
= 0;
586 DTRACE_PROBE2(txg__synced
, dsl_pool_t
*, dp
, uint64_t, txg
);
587 cv_broadcast(&tx
->tx_sync_done_cv
);
590 * Dispatch commit callbacks to worker threads.
592 txg_dispatch_callbacks(dp
, txg
);
597 txg_quiesce_thread(void *arg
)
599 dsl_pool_t
*dp
= arg
;
600 tx_state_t
*tx
= &dp
->dp_tx
;
603 txg_thread_enter(tx
, &cpr
);
609 * We quiesce when there's someone waiting on us.
610 * However, we can only have one txg in "quiescing" or
611 * "quiesced, waiting to sync" state. So we wait until
612 * the "quiesced, waiting to sync" txg has been consumed
613 * by the sync thread.
615 while (!tx
->tx_exiting
&&
616 (tx
->tx_open_txg
>= tx
->tx_quiesce_txg_waiting
||
617 txg_has_quiesced_to_sync(dp
)))
618 txg_thread_wait(tx
, &cpr
, &tx
->tx_quiesce_more_cv
, 0);
621 txg_thread_exit(tx
, &cpr
, &tx
->tx_quiesce_thread
);
623 txg
= tx
->tx_open_txg
;
624 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
625 txg
, tx
->tx_quiesce_txg_waiting
,
626 tx
->tx_sync_txg_waiting
);
627 tx
->tx_quiescing_txg
= txg
;
629 mutex_exit(&tx
->tx_sync_lock
);
630 txg_quiesce(dp
, txg
);
631 mutex_enter(&tx
->tx_sync_lock
);
634 * Hand this txg off to the sync thread.
636 dprintf("quiesce done, handing off txg %llu\n", txg
);
637 tx
->tx_quiescing_txg
= 0;
638 tx
->tx_quiesced_txg
= txg
;
639 DTRACE_PROBE2(txg__quiesced
, dsl_pool_t
*, dp
, uint64_t, txg
);
640 cv_broadcast(&tx
->tx_sync_more_cv
);
641 cv_broadcast(&tx
->tx_quiesce_done_cv
);
646 * Delay this thread by delay nanoseconds if we are still in the open
647 * transaction group and there is already a waiting txg quiescing or quiesced.
648 * Abort the delay if this txg stalls or enters the quiescing state.
651 txg_delay(dsl_pool_t
*dp
, uint64_t txg
, hrtime_t delay
, hrtime_t resolution
)
653 tx_state_t
*tx
= &dp
->dp_tx
;
654 hrtime_t start
= gethrtime();
656 /* don't delay if this txg could transition to quiescing immediately */
657 if (tx
->tx_open_txg
> txg
||
658 tx
->tx_syncing_txg
== txg
-1 || tx
->tx_synced_txg
== txg
-1)
661 mutex_enter(&tx
->tx_sync_lock
);
662 if (tx
->tx_open_txg
> txg
|| tx
->tx_synced_txg
== txg
-1) {
663 mutex_exit(&tx
->tx_sync_lock
);
667 while (gethrtime() - start
< delay
&&
668 tx
->tx_syncing_txg
< txg
-1 && !txg_stalled(dp
)) {
669 (void) cv_timedwait_hires(&tx
->tx_quiesce_more_cv
,
670 &tx
->tx_sync_lock
, delay
, resolution
, 0);
673 DMU_TX_STAT_BUMP(dmu_tx_delay
);
675 mutex_exit(&tx
->tx_sync_lock
);
679 txg_wait_synced_impl(dsl_pool_t
*dp
, uint64_t txg
, boolean_t wait_sig
)
681 tx_state_t
*tx
= &dp
->dp_tx
;
683 ASSERT(!dsl_pool_config_held(dp
));
685 mutex_enter(&tx
->tx_sync_lock
);
686 ASSERT3U(tx
->tx_threads
, ==, 2);
688 txg
= tx
->tx_open_txg
+ TXG_DEFER_SIZE
;
689 if (tx
->tx_sync_txg_waiting
< txg
)
690 tx
->tx_sync_txg_waiting
= txg
;
691 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
692 txg
, tx
->tx_quiesce_txg_waiting
, tx
->tx_sync_txg_waiting
);
693 while (tx
->tx_synced_txg
< txg
) {
694 dprintf("broadcasting sync more "
695 "tx_synced=%llu waiting=%llu dp=%px\n",
696 tx
->tx_synced_txg
, tx
->tx_sync_txg_waiting
, dp
);
697 cv_broadcast(&tx
->tx_sync_more_cv
);
700 * Condition wait here but stop if the thread receives a
701 * signal. The caller may call txg_wait_synced*() again
702 * to resume waiting for this txg.
704 if (cv_wait_io_sig(&tx
->tx_sync_done_cv
,
705 &tx
->tx_sync_lock
) == 0) {
706 mutex_exit(&tx
->tx_sync_lock
);
710 cv_wait_io(&tx
->tx_sync_done_cv
, &tx
->tx_sync_lock
);
713 mutex_exit(&tx
->tx_sync_lock
);
718 txg_wait_synced(dsl_pool_t
*dp
, uint64_t txg
)
720 VERIFY0(txg_wait_synced_impl(dp
, txg
, B_FALSE
));
724 * Similar to a txg_wait_synced but it can be interrupted from a signal.
725 * Returns B_TRUE if the thread was signaled while waiting.
728 txg_wait_synced_sig(dsl_pool_t
*dp
, uint64_t txg
)
730 return (txg_wait_synced_impl(dp
, txg
, B_TRUE
));
734 * Wait for the specified open transaction group. Set should_quiesce
735 * when the current open txg should be quiesced immediately.
738 txg_wait_open(dsl_pool_t
*dp
, uint64_t txg
, boolean_t should_quiesce
)
740 tx_state_t
*tx
= &dp
->dp_tx
;
742 ASSERT(!dsl_pool_config_held(dp
));
744 mutex_enter(&tx
->tx_sync_lock
);
745 ASSERT3U(tx
->tx_threads
, ==, 2);
747 txg
= tx
->tx_open_txg
+ 1;
748 if (tx
->tx_quiesce_txg_waiting
< txg
&& should_quiesce
)
749 tx
->tx_quiesce_txg_waiting
= txg
;
750 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
751 txg
, tx
->tx_quiesce_txg_waiting
, tx
->tx_sync_txg_waiting
);
752 while (tx
->tx_open_txg
< txg
) {
753 cv_broadcast(&tx
->tx_quiesce_more_cv
);
755 * Callers setting should_quiesce will use cv_wait_io() and
756 * be accounted for as iowait time. Otherwise, the caller is
757 * understood to be idle and cv_wait_sig() is used to prevent
758 * incorrectly inflating the system load average.
760 if (should_quiesce
== B_TRUE
) {
761 cv_wait_io(&tx
->tx_quiesce_done_cv
, &tx
->tx_sync_lock
);
763 cv_wait_sig(&tx
->tx_quiesce_done_cv
, &tx
->tx_sync_lock
);
766 mutex_exit(&tx
->tx_sync_lock
);
770 * If there isn't a txg syncing or in the pipeline, push another txg through
771 * the pipeline by quiescing the open txg.
774 txg_kick(dsl_pool_t
*dp
)
776 tx_state_t
*tx
= &dp
->dp_tx
;
778 ASSERT(!dsl_pool_config_held(dp
));
780 mutex_enter(&tx
->tx_sync_lock
);
781 if (!txg_is_syncing(dp
) &&
782 !txg_is_quiescing(dp
) &&
783 tx
->tx_quiesce_txg_waiting
<= tx
->tx_open_txg
&&
784 tx
->tx_sync_txg_waiting
<= tx
->tx_synced_txg
&&
785 tx
->tx_quiesced_txg
<= tx
->tx_synced_txg
) {
786 tx
->tx_quiesce_txg_waiting
= tx
->tx_open_txg
+ 1;
787 cv_broadcast(&tx
->tx_quiesce_more_cv
);
789 mutex_exit(&tx
->tx_sync_lock
);
793 txg_stalled(dsl_pool_t
*dp
)
795 tx_state_t
*tx
= &dp
->dp_tx
;
796 return (tx
->tx_quiesce_txg_waiting
> tx
->tx_open_txg
);
800 txg_sync_waiting(dsl_pool_t
*dp
)
802 tx_state_t
*tx
= &dp
->dp_tx
;
804 return (tx
->tx_syncing_txg
<= tx
->tx_sync_txg_waiting
||
805 tx
->tx_quiesced_txg
!= 0);
809 * Verify that this txg is active (open, quiescing, syncing). Non-active
810 * txg's should not be manipulated.
814 txg_verify(spa_t
*spa
, uint64_t txg
)
816 dsl_pool_t
*dp __maybe_unused
= spa_get_dsl(spa
);
817 if (txg
<= TXG_INITIAL
|| txg
== ZILTEST_TXG
)
819 ASSERT3U(txg
, <=, dp
->dp_tx
.tx_open_txg
);
820 ASSERT3U(txg
, >=, dp
->dp_tx
.tx_synced_txg
);
821 ASSERT3U(txg
, >=, dp
->dp_tx
.tx_open_txg
- TXG_CONCURRENT_STATES
);
826 * Per-txg object lists.
829 txg_list_create(txg_list_t
*tl
, spa_t
*spa
, size_t offset
)
833 mutex_init(&tl
->tl_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
835 tl
->tl_offset
= offset
;
838 for (t
= 0; t
< TXG_SIZE
; t
++)
839 tl
->tl_head
[t
] = NULL
;
843 txg_list_empty_impl(txg_list_t
*tl
, uint64_t txg
)
845 ASSERT(MUTEX_HELD(&tl
->tl_lock
));
846 TXG_VERIFY(tl
->tl_spa
, txg
);
847 return (tl
->tl_head
[txg
& TXG_MASK
] == NULL
);
851 txg_list_empty(txg_list_t
*tl
, uint64_t txg
)
853 mutex_enter(&tl
->tl_lock
);
854 boolean_t ret
= txg_list_empty_impl(tl
, txg
);
855 mutex_exit(&tl
->tl_lock
);
861 txg_list_destroy(txg_list_t
*tl
)
865 mutex_enter(&tl
->tl_lock
);
866 for (t
= 0; t
< TXG_SIZE
; t
++)
867 ASSERT(txg_list_empty_impl(tl
, t
));
868 mutex_exit(&tl
->tl_lock
);
870 mutex_destroy(&tl
->tl_lock
);
874 * Returns true if all txg lists are empty.
876 * Warning: this is inherently racy (an item could be added immediately
877 * after this function returns).
880 txg_all_lists_empty(txg_list_t
*tl
)
882 mutex_enter(&tl
->tl_lock
);
883 for (int i
= 0; i
< TXG_SIZE
; i
++) {
884 if (!txg_list_empty_impl(tl
, i
)) {
885 mutex_exit(&tl
->tl_lock
);
889 mutex_exit(&tl
->tl_lock
);
894 * Add an entry to the list (unless it's already on the list).
895 * Returns B_TRUE if it was actually added.
898 txg_list_add(txg_list_t
*tl
, void *p
, uint64_t txg
)
900 int t
= txg
& TXG_MASK
;
901 txg_node_t
*tn
= (txg_node_t
*)((char *)p
+ tl
->tl_offset
);
904 TXG_VERIFY(tl
->tl_spa
, txg
);
905 mutex_enter(&tl
->tl_lock
);
906 add
= (tn
->tn_member
[t
] == 0);
908 tn
->tn_member
[t
] = 1;
909 tn
->tn_next
[t
] = tl
->tl_head
[t
];
912 mutex_exit(&tl
->tl_lock
);
918 * Add an entry to the end of the list, unless it's already on the list.
919 * (walks list to find end)
920 * Returns B_TRUE if it was actually added.
923 txg_list_add_tail(txg_list_t
*tl
, void *p
, uint64_t txg
)
925 int t
= txg
& TXG_MASK
;
926 txg_node_t
*tn
= (txg_node_t
*)((char *)p
+ tl
->tl_offset
);
929 TXG_VERIFY(tl
->tl_spa
, txg
);
930 mutex_enter(&tl
->tl_lock
);
931 add
= (tn
->tn_member
[t
] == 0);
935 for (tp
= &tl
->tl_head
[t
]; *tp
!= NULL
; tp
= &(*tp
)->tn_next
[t
])
938 tn
->tn_member
[t
] = 1;
939 tn
->tn_next
[t
] = NULL
;
942 mutex_exit(&tl
->tl_lock
);
948 * Remove the head of the list and return it.
951 txg_list_remove(txg_list_t
*tl
, uint64_t txg
)
953 int t
= txg
& TXG_MASK
;
957 TXG_VERIFY(tl
->tl_spa
, txg
);
958 mutex_enter(&tl
->tl_lock
);
959 if ((tn
= tl
->tl_head
[t
]) != NULL
) {
960 ASSERT(tn
->tn_member
[t
]);
961 ASSERT(tn
->tn_next
[t
] == NULL
|| tn
->tn_next
[t
]->tn_member
[t
]);
962 p
= (char *)tn
- tl
->tl_offset
;
963 tl
->tl_head
[t
] = tn
->tn_next
[t
];
964 tn
->tn_next
[t
] = NULL
;
965 tn
->tn_member
[t
] = 0;
967 mutex_exit(&tl
->tl_lock
);
973 * Remove a specific item from the list and return it.
976 txg_list_remove_this(txg_list_t
*tl
, void *p
, uint64_t txg
)
978 int t
= txg
& TXG_MASK
;
979 txg_node_t
*tn
, **tp
;
981 TXG_VERIFY(tl
->tl_spa
, txg
);
982 mutex_enter(&tl
->tl_lock
);
984 for (tp
= &tl
->tl_head
[t
]; (tn
= *tp
) != NULL
; tp
= &tn
->tn_next
[t
]) {
985 if ((char *)tn
- tl
->tl_offset
== p
) {
986 *tp
= tn
->tn_next
[t
];
987 tn
->tn_next
[t
] = NULL
;
988 tn
->tn_member
[t
] = 0;
989 mutex_exit(&tl
->tl_lock
);
994 mutex_exit(&tl
->tl_lock
);
1000 txg_list_member(txg_list_t
*tl
, void *p
, uint64_t txg
)
1002 int t
= txg
& TXG_MASK
;
1003 txg_node_t
*tn
= (txg_node_t
*)((char *)p
+ tl
->tl_offset
);
1005 TXG_VERIFY(tl
->tl_spa
, txg
);
1006 return (tn
->tn_member
[t
] != 0);
1013 txg_list_head(txg_list_t
*tl
, uint64_t txg
)
1015 int t
= txg
& TXG_MASK
;
1018 mutex_enter(&tl
->tl_lock
);
1019 tn
= tl
->tl_head
[t
];
1020 mutex_exit(&tl
->tl_lock
);
1022 TXG_VERIFY(tl
->tl_spa
, txg
);
1023 return (tn
== NULL
? NULL
: (char *)tn
- tl
->tl_offset
);
1027 txg_list_next(txg_list_t
*tl
, void *p
, uint64_t txg
)
1029 int t
= txg
& TXG_MASK
;
1030 txg_node_t
*tn
= (txg_node_t
*)((char *)p
+ tl
->tl_offset
);
1032 TXG_VERIFY(tl
->tl_spa
, txg
);
1034 mutex_enter(&tl
->tl_lock
);
1035 tn
= tn
->tn_next
[t
];
1036 mutex_exit(&tl
->tl_lock
);
1038 return (tn
== NULL
? NULL
: (char *)tn
- tl
->tl_offset
);
1041 EXPORT_SYMBOL(txg_init
);
1042 EXPORT_SYMBOL(txg_fini
);
1043 EXPORT_SYMBOL(txg_sync_start
);
1044 EXPORT_SYMBOL(txg_sync_stop
);
1045 EXPORT_SYMBOL(txg_hold_open
);
1046 EXPORT_SYMBOL(txg_rele_to_quiesce
);
1047 EXPORT_SYMBOL(txg_rele_to_sync
);
1048 EXPORT_SYMBOL(txg_register_callbacks
);
1049 EXPORT_SYMBOL(txg_delay
);
1050 EXPORT_SYMBOL(txg_wait_synced
);
1051 EXPORT_SYMBOL(txg_wait_open
);
1052 EXPORT_SYMBOL(txg_wait_callbacks
);
1053 EXPORT_SYMBOL(txg_stalled
);
1054 EXPORT_SYMBOL(txg_sync_waiting
);
1057 ZFS_MODULE_PARAM(zfs_txg
, zfs_txg_
, timeout
, INT
, ZMOD_RW
,
1058 "Max seconds worth of delta per txg");