]>
git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/txg.c
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Portions Copyright 2011 Martin Matuska
24 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
27 #include <sys/zfs_context.h>
28 #include <sys/txg_impl.h>
29 #include <sys/dmu_impl.h>
30 #include <sys/spa_impl.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dsl_pool.h>
33 #include <sys/dsl_scan.h>
35 #include <sys/callb.h>
36 #include <sys/trace_txg.h>
39 * ZFS Transaction Groups
40 * ----------------------
42 * ZFS transaction groups are, as the name implies, groups of transactions
43 * that act on persistent state. ZFS asserts consistency at the granularity of
44 * these transaction groups. Each successive transaction group (txg) is
45 * assigned a 64-bit consecutive identifier. There are three active
46 * transaction group states: open, quiescing, or syncing. At any given time,
47 * there may be an active txg associated with each state; each active txg may
48 * either be processing, or blocked waiting to enter the next state. There may
49 * be up to three active txgs, and there is always a txg in the open state
50 * (though it may be blocked waiting to enter the quiescing state). In broad
51 * strokes, transactions -- operations that change in-memory structures -- are
52 * accepted into the txg in the open state, and are completed while the txg is
53 * in the open or quiescing states. The accumulated changes are written to
54 * disk in the syncing state.
58 * When a new txg becomes active, it first enters the open state. New
59 * transactions -- updates to in-memory structures -- are assigned to the
60 * currently open txg. There is always a txg in the open state so that ZFS can
61 * accept new changes (though the txg may refuse new changes if it has hit
62 * some limit). ZFS advances the open txg to the next state for a variety of
63 * reasons such as it hitting a time or size threshold, or the execution of an
64 * administrative action that must be completed in the syncing state.
68 * After a txg exits the open state, it enters the quiescing state. The
69 * quiescing state is intended to provide a buffer between accepting new
70 * transactions in the open state and writing them out to stable storage in
71 * the syncing state. While quiescing, transactions can continue their
72 * operation without delaying either of the other states. Typically, a txg is
73 * in the quiescing state very briefly since the operations are bounded by
74 * software latencies rather than, say, slower I/O latencies. After all
75 * transactions complete, the txg is ready to enter the next state.
79 * In the syncing state, the in-memory state built up during the open and (to
80 * a lesser degree) the quiescing states is written to stable storage. The
81 * process of writing out modified data can, in turn modify more data. For
82 * example when we write new blocks, we need to allocate space for them; those
83 * allocations modify metadata (space maps)... which themselves must be
84 * written to stable storage. During the sync state, ZFS iterates, writing out
85 * data until it converges and all in-memory changes have been written out.
86 * The first such pass is the largest as it encompasses all the modified user
87 * data (as opposed to filesystem metadata). Subsequent passes typically have
88 * far less data to write as they consist exclusively of filesystem metadata.
90 * To ensure convergence, after a certain number of passes ZFS begins
91 * overwriting locations on stable storage that had been allocated earlier in
92 * the syncing state (and subsequently freed). ZFS usually allocates new
93 * blocks to optimize for large, continuous, writes. For the syncing state to
94 * converge however it must complete a pass where no new blocks are allocated
95 * since each allocation requires a modification of persistent metadata.
96 * Further, to hasten convergence, after a prescribed number of passes, ZFS
97 * also defers frees, and stops compressing.
99 * In addition to writing out user data, we must also execute synctasks during
100 * the syncing context. A synctask is the mechanism by which some
101 * administrative activities work such as creating and destroying snapshots or
102 * datasets. Note that when a synctask is initiated it enters the open txg,
103 * and ZFS then pushes that txg as quickly as possible to completion of the
104 * syncing state in order to reduce the latency of the administrative
105 * activity. To complete the syncing state, ZFS writes out a new uberblock,
106 * the root of the tree of blocks that comprise all state stored on the ZFS
107 * pool. Finally, if there is a quiesced txg waiting, we signal that it can
108 * now transition to the syncing state.
111 static void txg_sync_thread(void *dp
);
112 static void txg_quiesce_thread(void *dp
);
114 int zfs_txg_timeout
= 5; /* max seconds worth of delta per txg */
117 * Prepare the txg subsystem.
120 txg_init(dsl_pool_t
*dp
, uint64_t txg
)
122 tx_state_t
*tx
= &dp
->dp_tx
;
124 bzero(tx
, sizeof (tx_state_t
));
126 tx
->tx_cpu
= vmem_zalloc(max_ncpus
* sizeof (tx_cpu_t
), KM_SLEEP
);
128 for (c
= 0; c
< max_ncpus
; c
++) {
131 mutex_init(&tx
->tx_cpu
[c
].tc_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
132 mutex_init(&tx
->tx_cpu
[c
].tc_open_lock
, NULL
, MUTEX_NOLOCKDEP
,
134 for (i
= 0; i
< TXG_SIZE
; i
++) {
135 cv_init(&tx
->tx_cpu
[c
].tc_cv
[i
], NULL
, CV_DEFAULT
,
137 list_create(&tx
->tx_cpu
[c
].tc_callbacks
[i
],
138 sizeof (dmu_tx_callback_t
),
139 offsetof(dmu_tx_callback_t
, dcb_node
));
143 mutex_init(&tx
->tx_sync_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
145 cv_init(&tx
->tx_sync_more_cv
, NULL
, CV_DEFAULT
, NULL
);
146 cv_init(&tx
->tx_sync_done_cv
, NULL
, CV_DEFAULT
, NULL
);
147 cv_init(&tx
->tx_quiesce_more_cv
, NULL
, CV_DEFAULT
, NULL
);
148 cv_init(&tx
->tx_quiesce_done_cv
, NULL
, CV_DEFAULT
, NULL
);
149 cv_init(&tx
->tx_exit_cv
, NULL
, CV_DEFAULT
, NULL
);
151 tx
->tx_open_txg
= txg
;
155 * Close down the txg subsystem.
158 txg_fini(dsl_pool_t
*dp
)
160 tx_state_t
*tx
= &dp
->dp_tx
;
163 ASSERT(tx
->tx_threads
== 0);
165 mutex_destroy(&tx
->tx_sync_lock
);
167 cv_destroy(&tx
->tx_sync_more_cv
);
168 cv_destroy(&tx
->tx_sync_done_cv
);
169 cv_destroy(&tx
->tx_quiesce_more_cv
);
170 cv_destroy(&tx
->tx_quiesce_done_cv
);
171 cv_destroy(&tx
->tx_exit_cv
);
173 for (c
= 0; c
< max_ncpus
; c
++) {
176 mutex_destroy(&tx
->tx_cpu
[c
].tc_open_lock
);
177 mutex_destroy(&tx
->tx_cpu
[c
].tc_lock
);
178 for (i
= 0; i
< TXG_SIZE
; i
++) {
179 cv_destroy(&tx
->tx_cpu
[c
].tc_cv
[i
]);
180 list_destroy(&tx
->tx_cpu
[c
].tc_callbacks
[i
]);
184 if (tx
->tx_commit_cb_taskq
!= NULL
)
185 taskq_destroy(tx
->tx_commit_cb_taskq
);
187 vmem_free(tx
->tx_cpu
, max_ncpus
* sizeof (tx_cpu_t
));
189 bzero(tx
, sizeof (tx_state_t
));
193 * Start syncing transaction groups.
196 txg_sync_start(dsl_pool_t
*dp
)
198 tx_state_t
*tx
= &dp
->dp_tx
;
200 mutex_enter(&tx
->tx_sync_lock
);
202 dprintf("pool %p\n", dp
);
204 ASSERT(tx
->tx_threads
== 0);
208 tx
->tx_quiesce_thread
= thread_create(NULL
, 0, txg_quiesce_thread
,
209 dp
, 0, &p0
, TS_RUN
, defclsyspri
);
212 * The sync thread can need a larger-than-default stack size on
213 * 32-bit x86. This is due in part to nested pools and
214 * scrub_visitbp() recursion.
216 tx
->tx_sync_thread
= thread_create(NULL
, 0, txg_sync_thread
,
217 dp
, 0, &p0
, TS_RUN
, defclsyspri
);
219 mutex_exit(&tx
->tx_sync_lock
);
223 txg_thread_enter(tx_state_t
*tx
, callb_cpr_t
*cpr
)
225 CALLB_CPR_INIT(cpr
, &tx
->tx_sync_lock
, callb_generic_cpr
, FTAG
);
226 mutex_enter(&tx
->tx_sync_lock
);
230 txg_thread_exit(tx_state_t
*tx
, callb_cpr_t
*cpr
, kthread_t
**tpp
)
232 ASSERT(*tpp
!= NULL
);
235 cv_broadcast(&tx
->tx_exit_cv
);
236 CALLB_CPR_EXIT(cpr
); /* drops &tx->tx_sync_lock */
241 txg_thread_wait(tx_state_t
*tx
, callb_cpr_t
*cpr
, kcondvar_t
*cv
, clock_t time
)
243 CALLB_CPR_SAFE_BEGIN(cpr
);
246 (void) cv_timedwait_sig(cv
, &tx
->tx_sync_lock
,
247 ddi_get_lbolt() + time
);
249 cv_wait_sig(cv
, &tx
->tx_sync_lock
);
251 CALLB_CPR_SAFE_END(cpr
, &tx
->tx_sync_lock
);
255 * Stop syncing transaction groups.
258 txg_sync_stop(dsl_pool_t
*dp
)
260 tx_state_t
*tx
= &dp
->dp_tx
;
262 dprintf("pool %p\n", dp
);
264 * Finish off any work in progress.
266 ASSERT(tx
->tx_threads
== 2);
269 * We need to ensure that we've vacated the deferred space_maps.
271 txg_wait_synced(dp
, tx
->tx_open_txg
+ TXG_DEFER_SIZE
);
274 * Wake all sync threads and wait for them to die.
276 mutex_enter(&tx
->tx_sync_lock
);
278 ASSERT(tx
->tx_threads
== 2);
282 cv_broadcast(&tx
->tx_quiesce_more_cv
);
283 cv_broadcast(&tx
->tx_quiesce_done_cv
);
284 cv_broadcast(&tx
->tx_sync_more_cv
);
286 while (tx
->tx_threads
!= 0)
287 cv_wait(&tx
->tx_exit_cv
, &tx
->tx_sync_lock
);
291 mutex_exit(&tx
->tx_sync_lock
);
295 txg_hold_open(dsl_pool_t
*dp
, txg_handle_t
*th
)
297 tx_state_t
*tx
= &dp
->dp_tx
;
302 * It appears the processor id is simply used as a "random"
303 * number to index into the array, and there isn't any other
304 * significance to the chosen tx_cpu. Because.. Why not use
305 * the current cpu to index into the array?
308 tc
= &tx
->tx_cpu
[CPU_SEQID
];
311 mutex_enter(&tc
->tc_open_lock
);
312 txg
= tx
->tx_open_txg
;
314 mutex_enter(&tc
->tc_lock
);
315 tc
->tc_count
[txg
& TXG_MASK
]++;
316 mutex_exit(&tc
->tc_lock
);
325 txg_rele_to_quiesce(txg_handle_t
*th
)
327 tx_cpu_t
*tc
= th
->th_cpu
;
329 ASSERT(!MUTEX_HELD(&tc
->tc_lock
));
330 mutex_exit(&tc
->tc_open_lock
);
334 txg_register_callbacks(txg_handle_t
*th
, list_t
*tx_callbacks
)
336 tx_cpu_t
*tc
= th
->th_cpu
;
337 int g
= th
->th_txg
& TXG_MASK
;
339 mutex_enter(&tc
->tc_lock
);
340 list_move_tail(&tc
->tc_callbacks
[g
], tx_callbacks
);
341 mutex_exit(&tc
->tc_lock
);
345 txg_rele_to_sync(txg_handle_t
*th
)
347 tx_cpu_t
*tc
= th
->th_cpu
;
348 int g
= th
->th_txg
& TXG_MASK
;
350 mutex_enter(&tc
->tc_lock
);
351 ASSERT(tc
->tc_count
[g
] != 0);
352 if (--tc
->tc_count
[g
] == 0)
353 cv_broadcast(&tc
->tc_cv
[g
]);
354 mutex_exit(&tc
->tc_lock
);
356 th
->th_cpu
= NULL
; /* defensive */
360 * Blocks until all transactions in the group are committed.
362 * On return, the transaction group has reached a stable state in which it can
363 * then be passed off to the syncing context.
366 txg_quiesce(dsl_pool_t
*dp
, uint64_t txg
)
368 tx_state_t
*tx
= &dp
->dp_tx
;
369 uint64_t tx_open_time
;
370 int g
= txg
& TXG_MASK
;
374 * Grab all tc_open_locks so nobody else can get into this txg.
376 for (c
= 0; c
< max_ncpus
; c
++)
377 mutex_enter(&tx
->tx_cpu
[c
].tc_open_lock
);
379 ASSERT(txg
== tx
->tx_open_txg
);
381 tx
->tx_open_time
= tx_open_time
= gethrtime();
383 DTRACE_PROBE2(txg__quiescing
, dsl_pool_t
*, dp
, uint64_t, txg
);
384 DTRACE_PROBE2(txg__opened
, dsl_pool_t
*, dp
, uint64_t, tx
->tx_open_txg
);
387 * Now that we've incremented tx_open_txg, we can let threads
388 * enter the next transaction group.
390 for (c
= 0; c
< max_ncpus
; c
++)
391 mutex_exit(&tx
->tx_cpu
[c
].tc_open_lock
);
393 spa_txg_history_set(dp
->dp_spa
, txg
, TXG_STATE_OPEN
, tx_open_time
);
394 spa_txg_history_add(dp
->dp_spa
, txg
+ 1, tx_open_time
);
397 * Quiesce the transaction group by waiting for everyone to txg_exit().
399 for (c
= 0; c
< max_ncpus
; c
++) {
400 tx_cpu_t
*tc
= &tx
->tx_cpu
[c
];
401 mutex_enter(&tc
->tc_lock
);
402 while (tc
->tc_count
[g
] != 0)
403 cv_wait(&tc
->tc_cv
[g
], &tc
->tc_lock
);
404 mutex_exit(&tc
->tc_lock
);
407 spa_txg_history_set(dp
->dp_spa
, txg
, TXG_STATE_QUIESCED
, gethrtime());
411 txg_do_callbacks(list_t
*cb_list
)
413 dmu_tx_do_callbacks(cb_list
, 0);
415 list_destroy(cb_list
);
417 kmem_free(cb_list
, sizeof (list_t
));
421 * Dispatch the commit callbacks registered on this txg to worker threads.
423 * If no callbacks are registered for a given TXG, nothing happens.
424 * This function creates a taskq for the associated pool, if needed.
427 txg_dispatch_callbacks(dsl_pool_t
*dp
, uint64_t txg
)
430 tx_state_t
*tx
= &dp
->dp_tx
;
433 for (c
= 0; c
< max_ncpus
; c
++) {
434 tx_cpu_t
*tc
= &tx
->tx_cpu
[c
];
436 * No need to lock tx_cpu_t at this point, since this can
437 * only be called once a txg has been synced.
440 int g
= txg
& TXG_MASK
;
442 if (list_is_empty(&tc
->tc_callbacks
[g
]))
445 if (tx
->tx_commit_cb_taskq
== NULL
) {
447 * Commit callback taskq hasn't been created yet.
449 tx
->tx_commit_cb_taskq
= taskq_create("tx_commit_cb",
450 max_ncpus
, defclsyspri
, max_ncpus
, max_ncpus
* 2,
451 TASKQ_PREPOPULATE
| TASKQ_DYNAMIC
);
454 cb_list
= kmem_alloc(sizeof (list_t
), KM_SLEEP
);
455 list_create(cb_list
, sizeof (dmu_tx_callback_t
),
456 offsetof(dmu_tx_callback_t
, dcb_node
));
458 list_move_tail(cb_list
, &tc
->tc_callbacks
[g
]);
460 (void) taskq_dispatch(tx
->tx_commit_cb_taskq
, (task_func_t
*)
461 txg_do_callbacks
, cb_list
, TQ_SLEEP
);
466 * Wait for pending commit callbacks of already-synced transactions to finish
468 * Calling this function from within a commit callback will deadlock.
471 txg_wait_callbacks(dsl_pool_t
*dp
)
473 tx_state_t
*tx
= &dp
->dp_tx
;
475 if (tx
->tx_commit_cb_taskq
!= NULL
)
476 taskq_wait_outstanding(tx
->tx_commit_cb_taskq
, 0);
480 txg_sync_thread(void *arg
)
482 dsl_pool_t
*dp
= (dsl_pool_t
*)arg
;
483 spa_t
*spa
= dp
->dp_spa
;
484 tx_state_t
*tx
= &dp
->dp_tx
;
486 clock_t start
, delta
;
488 (void) spl_fstrans_mark();
489 txg_thread_enter(tx
, &cpr
);
493 clock_t timeout
= zfs_txg_timeout
* hz
;
499 * We sync when we're scanning, there's someone waiting
500 * on us, or the quiesce thread has handed off a txg to
501 * us, or we have reached our timeout.
503 timer
= (delta
>= timeout
? 0 : timeout
- delta
);
504 while (!dsl_scan_active(dp
->dp_scan
) &&
505 !tx
->tx_exiting
&& timer
> 0 &&
506 tx
->tx_synced_txg
>= tx
->tx_sync_txg_waiting
&&
507 tx
->tx_quiesced_txg
== 0 &&
508 dp
->dp_dirty_total
< zfs_dirty_data_sync
) {
509 dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
510 tx
->tx_synced_txg
, tx
->tx_sync_txg_waiting
, dp
);
511 txg_thread_wait(tx
, &cpr
, &tx
->tx_sync_more_cv
, timer
);
512 delta
= ddi_get_lbolt() - start
;
513 timer
= (delta
> timeout
? 0 : timeout
- delta
);
517 * Wait until the quiesce thread hands off a txg to us,
518 * prompting it to do so if necessary.
520 while (!tx
->tx_exiting
&& tx
->tx_quiesced_txg
== 0) {
521 if (tx
->tx_quiesce_txg_waiting
< tx
->tx_open_txg
+1)
522 tx
->tx_quiesce_txg_waiting
= tx
->tx_open_txg
+1;
523 cv_broadcast(&tx
->tx_quiesce_more_cv
);
524 txg_thread_wait(tx
, &cpr
, &tx
->tx_quiesce_done_cv
, 0);
528 txg_thread_exit(tx
, &cpr
, &tx
->tx_sync_thread
);
531 * Consume the quiesced txg which has been handed off to
532 * us. This may cause the quiescing thread to now be
533 * able to quiesce another txg, so we must signal it.
535 txg
= tx
->tx_quiesced_txg
;
536 tx
->tx_quiesced_txg
= 0;
537 tx
->tx_syncing_txg
= txg
;
538 DTRACE_PROBE2(txg__syncing
, dsl_pool_t
*, dp
, uint64_t, txg
);
539 ts
= spa_txg_history_init_io(spa
, txg
, dp
);
540 cv_broadcast(&tx
->tx_quiesce_more_cv
);
542 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
543 txg
, tx
->tx_quiesce_txg_waiting
, tx
->tx_sync_txg_waiting
);
544 mutex_exit(&tx
->tx_sync_lock
);
546 start
= ddi_get_lbolt();
548 delta
= ddi_get_lbolt() - start
;
550 mutex_enter(&tx
->tx_sync_lock
);
551 tx
->tx_synced_txg
= txg
;
552 tx
->tx_syncing_txg
= 0;
553 DTRACE_PROBE2(txg__synced
, dsl_pool_t
*, dp
, uint64_t, txg
);
554 spa_txg_history_fini_io(spa
, ts
);
555 cv_broadcast(&tx
->tx_sync_done_cv
);
558 * Dispatch commit callbacks to worker threads.
560 txg_dispatch_callbacks(dp
, txg
);
565 txg_quiesce_thread(void *arg
)
567 dsl_pool_t
*dp
= (dsl_pool_t
*)arg
;
568 tx_state_t
*tx
= &dp
->dp_tx
;
571 txg_thread_enter(tx
, &cpr
);
577 * We quiesce when there's someone waiting on us.
578 * However, we can only have one txg in "quiescing" or
579 * "quiesced, waiting to sync" state. So we wait until
580 * the "quiesced, waiting to sync" txg has been consumed
581 * by the sync thread.
583 while (!tx
->tx_exiting
&&
584 (tx
->tx_open_txg
>= tx
->tx_quiesce_txg_waiting
||
585 tx
->tx_quiesced_txg
!= 0))
586 txg_thread_wait(tx
, &cpr
, &tx
->tx_quiesce_more_cv
, 0);
589 txg_thread_exit(tx
, &cpr
, &tx
->tx_quiesce_thread
);
591 txg
= tx
->tx_open_txg
;
592 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
593 txg
, tx
->tx_quiesce_txg_waiting
,
594 tx
->tx_sync_txg_waiting
);
595 mutex_exit(&tx
->tx_sync_lock
);
596 txg_quiesce(dp
, txg
);
597 mutex_enter(&tx
->tx_sync_lock
);
600 * Hand this txg off to the sync thread.
602 dprintf("quiesce done, handing off txg %llu\n", txg
);
603 tx
->tx_quiesced_txg
= txg
;
604 DTRACE_PROBE2(txg__quiesced
, dsl_pool_t
*, dp
, uint64_t, txg
);
605 cv_broadcast(&tx
->tx_sync_more_cv
);
606 cv_broadcast(&tx
->tx_quiesce_done_cv
);
611 * Delay this thread by delay nanoseconds if we are still in the open
612 * transaction group and there is already a waiting txg quiesing or quiesced.
613 * Abort the delay if this txg stalls or enters the quiesing state.
616 txg_delay(dsl_pool_t
*dp
, uint64_t txg
, hrtime_t delay
, hrtime_t resolution
)
618 tx_state_t
*tx
= &dp
->dp_tx
;
619 hrtime_t start
= gethrtime();
621 /* don't delay if this txg could transition to quiescing immediately */
622 if (tx
->tx_open_txg
> txg
||
623 tx
->tx_syncing_txg
== txg
-1 || tx
->tx_synced_txg
== txg
-1)
626 mutex_enter(&tx
->tx_sync_lock
);
627 if (tx
->tx_open_txg
> txg
|| tx
->tx_synced_txg
== txg
-1) {
628 mutex_exit(&tx
->tx_sync_lock
);
632 while (gethrtime() - start
< delay
&&
633 tx
->tx_syncing_txg
< txg
-1 && !txg_stalled(dp
)) {
634 (void) cv_timedwait_hires(&tx
->tx_quiesce_more_cv
,
635 &tx
->tx_sync_lock
, delay
, resolution
, 0);
638 DMU_TX_STAT_BUMP(dmu_tx_delay
);
640 mutex_exit(&tx
->tx_sync_lock
);
644 txg_wait_synced(dsl_pool_t
*dp
, uint64_t txg
)
646 tx_state_t
*tx
= &dp
->dp_tx
;
648 ASSERT(!dsl_pool_config_held(dp
));
650 mutex_enter(&tx
->tx_sync_lock
);
651 ASSERT(tx
->tx_threads
== 2);
653 txg
= tx
->tx_open_txg
+ TXG_DEFER_SIZE
;
654 if (tx
->tx_sync_txg_waiting
< txg
)
655 tx
->tx_sync_txg_waiting
= txg
;
656 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
657 txg
, tx
->tx_quiesce_txg_waiting
, tx
->tx_sync_txg_waiting
);
658 while (tx
->tx_synced_txg
< txg
) {
659 dprintf("broadcasting sync more "
660 "tx_synced=%llu waiting=%llu dp=%p\n",
661 tx
->tx_synced_txg
, tx
->tx_sync_txg_waiting
, dp
);
662 cv_broadcast(&tx
->tx_sync_more_cv
);
663 cv_wait(&tx
->tx_sync_done_cv
, &tx
->tx_sync_lock
);
665 mutex_exit(&tx
->tx_sync_lock
);
669 txg_wait_open(dsl_pool_t
*dp
, uint64_t txg
)
671 tx_state_t
*tx
= &dp
->dp_tx
;
673 ASSERT(!dsl_pool_config_held(dp
));
675 mutex_enter(&tx
->tx_sync_lock
);
676 ASSERT(tx
->tx_threads
== 2);
678 txg
= tx
->tx_open_txg
+ 1;
679 if (tx
->tx_quiesce_txg_waiting
< txg
)
680 tx
->tx_quiesce_txg_waiting
= txg
;
681 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
682 txg
, tx
->tx_quiesce_txg_waiting
, tx
->tx_sync_txg_waiting
);
683 while (tx
->tx_open_txg
< txg
) {
684 cv_broadcast(&tx
->tx_quiesce_more_cv
);
685 cv_wait(&tx
->tx_quiesce_done_cv
, &tx
->tx_sync_lock
);
687 mutex_exit(&tx
->tx_sync_lock
);
691 * If there isn't a txg syncing or in the pipeline, push another txg through
692 * the pipeline by queiscing the open txg.
695 txg_kick(dsl_pool_t
*dp
)
697 tx_state_t
*tx
= &dp
->dp_tx
;
699 ASSERT(!dsl_pool_config_held(dp
));
701 mutex_enter(&tx
->tx_sync_lock
);
702 if (tx
->tx_syncing_txg
== 0 &&
703 tx
->tx_quiesce_txg_waiting
<= tx
->tx_open_txg
&&
704 tx
->tx_sync_txg_waiting
<= tx
->tx_synced_txg
&&
705 tx
->tx_quiesced_txg
<= tx
->tx_synced_txg
) {
706 tx
->tx_quiesce_txg_waiting
= tx
->tx_open_txg
+ 1;
707 cv_broadcast(&tx
->tx_quiesce_more_cv
);
709 mutex_exit(&tx
->tx_sync_lock
);
713 txg_stalled(dsl_pool_t
*dp
)
715 tx_state_t
*tx
= &dp
->dp_tx
;
716 return (tx
->tx_quiesce_txg_waiting
> tx
->tx_open_txg
);
720 txg_sync_waiting(dsl_pool_t
*dp
)
722 tx_state_t
*tx
= &dp
->dp_tx
;
724 return (tx
->tx_syncing_txg
<= tx
->tx_sync_txg_waiting
||
725 tx
->tx_quiesced_txg
!= 0);
729 * Verify that this txg is active (open, quiescing, syncing). Non-active
730 * txg's should not be manipulated.
733 txg_verify(spa_t
*spa
, uint64_t txg
)
735 ASSERTV(dsl_pool_t
*dp
= spa_get_dsl(spa
));
736 if (txg
<= TXG_INITIAL
|| txg
== ZILTEST_TXG
)
738 ASSERT3U(txg
, <=, dp
->dp_tx
.tx_open_txg
);
739 ASSERT3U(txg
, >=, dp
->dp_tx
.tx_synced_txg
);
740 ASSERT3U(txg
, >=, dp
->dp_tx
.tx_open_txg
- TXG_CONCURRENT_STATES
);
744 * Per-txg object lists.
747 txg_list_create(txg_list_t
*tl
, spa_t
*spa
, size_t offset
)
751 mutex_init(&tl
->tl_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
753 tl
->tl_offset
= offset
;
756 for (t
= 0; t
< TXG_SIZE
; t
++)
757 tl
->tl_head
[t
] = NULL
;
761 txg_list_destroy(txg_list_t
*tl
)
765 for (t
= 0; t
< TXG_SIZE
; t
++)
766 ASSERT(txg_list_empty(tl
, t
));
768 mutex_destroy(&tl
->tl_lock
);
772 txg_list_empty(txg_list_t
*tl
, uint64_t txg
)
774 txg_verify(tl
->tl_spa
, txg
);
775 return (tl
->tl_head
[txg
& TXG_MASK
] == NULL
);
779 * Returns true if all txg lists are empty.
781 * Warning: this is inherently racy (an item could be added immediately
782 * after this function returns). We don't bother with the lock because
783 * it wouldn't change the semantics.
786 txg_all_lists_empty(txg_list_t
*tl
)
790 for (i
= 0; i
< TXG_SIZE
; i
++) {
791 if (!txg_list_empty(tl
, i
)) {
799 * Add an entry to the list (unless it's already on the list).
800 * Returns B_TRUE if it was actually added.
803 txg_list_add(txg_list_t
*tl
, void *p
, uint64_t txg
)
805 int t
= txg
& TXG_MASK
;
806 txg_node_t
*tn
= (txg_node_t
*)((char *)p
+ tl
->tl_offset
);
809 txg_verify(tl
->tl_spa
, txg
);
810 mutex_enter(&tl
->tl_lock
);
811 add
= (tn
->tn_member
[t
] == 0);
813 tn
->tn_member
[t
] = 1;
814 tn
->tn_next
[t
] = tl
->tl_head
[t
];
817 mutex_exit(&tl
->tl_lock
);
823 * Add an entry to the end of the list, unless it's already on the list.
824 * (walks list to find end)
825 * Returns B_TRUE if it was actually added.
828 txg_list_add_tail(txg_list_t
*tl
, void *p
, uint64_t txg
)
830 int t
= txg
& TXG_MASK
;
831 txg_node_t
*tn
= (txg_node_t
*)((char *)p
+ tl
->tl_offset
);
834 txg_verify(tl
->tl_spa
, txg
);
835 mutex_enter(&tl
->tl_lock
);
836 add
= (tn
->tn_member
[t
] == 0);
840 for (tp
= &tl
->tl_head
[t
]; *tp
!= NULL
; tp
= &(*tp
)->tn_next
[t
])
843 tn
->tn_member
[t
] = 1;
844 tn
->tn_next
[t
] = NULL
;
847 mutex_exit(&tl
->tl_lock
);
853 * Remove the head of the list and return it.
856 txg_list_remove(txg_list_t
*tl
, uint64_t txg
)
858 int t
= txg
& TXG_MASK
;
862 txg_verify(tl
->tl_spa
, txg
);
863 mutex_enter(&tl
->tl_lock
);
864 if ((tn
= tl
->tl_head
[t
]) != NULL
) {
865 p
= (char *)tn
- tl
->tl_offset
;
866 tl
->tl_head
[t
] = tn
->tn_next
[t
];
867 tn
->tn_next
[t
] = NULL
;
868 tn
->tn_member
[t
] = 0;
870 mutex_exit(&tl
->tl_lock
);
876 * Remove a specific item from the list and return it.
879 txg_list_remove_this(txg_list_t
*tl
, void *p
, uint64_t txg
)
881 int t
= txg
& TXG_MASK
;
882 txg_node_t
*tn
, **tp
;
884 txg_verify(tl
->tl_spa
, txg
);
885 mutex_enter(&tl
->tl_lock
);
887 for (tp
= &tl
->tl_head
[t
]; (tn
= *tp
) != NULL
; tp
= &tn
->tn_next
[t
]) {
888 if ((char *)tn
- tl
->tl_offset
== p
) {
889 *tp
= tn
->tn_next
[t
];
890 tn
->tn_next
[t
] = NULL
;
891 tn
->tn_member
[t
] = 0;
892 mutex_exit(&tl
->tl_lock
);
897 mutex_exit(&tl
->tl_lock
);
903 txg_list_member(txg_list_t
*tl
, void *p
, uint64_t txg
)
905 int t
= txg
& TXG_MASK
;
906 txg_node_t
*tn
= (txg_node_t
*)((char *)p
+ tl
->tl_offset
);
908 txg_verify(tl
->tl_spa
, txg
);
909 return (tn
->tn_member
[t
] != 0);
913 * Walk a txg list -- only safe if you know it's not changing.
916 txg_list_head(txg_list_t
*tl
, uint64_t txg
)
918 int t
= txg
& TXG_MASK
;
919 txg_node_t
*tn
= tl
->tl_head
[t
];
921 txg_verify(tl
->tl_spa
, txg
);
922 return (tn
== NULL
? NULL
: (char *)tn
- tl
->tl_offset
);
926 txg_list_next(txg_list_t
*tl
, void *p
, uint64_t txg
)
928 int t
= txg
& TXG_MASK
;
929 txg_node_t
*tn
= (txg_node_t
*)((char *)p
+ tl
->tl_offset
);
931 txg_verify(tl
->tl_spa
, txg
);
934 return (tn
== NULL
? NULL
: (char *)tn
- tl
->tl_offset
);
937 #if defined(_KERNEL) && defined(HAVE_SPL)
938 EXPORT_SYMBOL(txg_init
);
939 EXPORT_SYMBOL(txg_fini
);
940 EXPORT_SYMBOL(txg_sync_start
);
941 EXPORT_SYMBOL(txg_sync_stop
);
942 EXPORT_SYMBOL(txg_hold_open
);
943 EXPORT_SYMBOL(txg_rele_to_quiesce
);
944 EXPORT_SYMBOL(txg_rele_to_sync
);
945 EXPORT_SYMBOL(txg_register_callbacks
);
946 EXPORT_SYMBOL(txg_delay
);
947 EXPORT_SYMBOL(txg_wait_synced
);
948 EXPORT_SYMBOL(txg_wait_open
);
949 EXPORT_SYMBOL(txg_wait_callbacks
);
950 EXPORT_SYMBOL(txg_stalled
);
951 EXPORT_SYMBOL(txg_sync_waiting
);
953 module_param(zfs_txg_timeout
, int, 0644);
954 MODULE_PARM_DESC(zfs_txg_timeout
, "Max seconds worth of delta per txg");