]>
git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/txg.c
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Portions Copyright 2011 Martin Matuska
24 * Copyright (c) 2013 by Delphix. All rights reserved.
27 #include <sys/zfs_context.h>
28 #include <sys/txg_impl.h>
29 #include <sys/dmu_impl.h>
30 #include <sys/spa_impl.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dsl_pool.h>
33 #include <sys/dsl_scan.h>
34 #include <sys/callb.h>
37 * ZFS Transaction Groups
38 * ----------------------
40 * ZFS transaction groups are, as the name implies, groups of transactions
41 * that act on persistent state. ZFS asserts consistency at the granularity of
42 * these transaction groups. Each successive transaction group (txg) is
43 * assigned a 64-bit consecutive identifier. There are three active
44 * transaction group states: open, quiescing, or syncing. At any given time,
45 * there may be an active txg associated with each state; each active txg may
46 * either be processing, or blocked waiting to enter the next state. There may
47 * be up to three active txgs, and there is always a txg in the open state
48 * (though it may be blocked waiting to enter the quiescing state). In broad
49 * strokes, transactions -- operations that change in-memory structures -- are
50 * accepted into the txg in the open state, and are completed while the txg is
51 * in the open or quiescing states. The accumulated changes are written to
52 * disk in the syncing state.
56 * When a new txg becomes active, it first enters the open state. New
57 * transactions -- updates to in-memory structures -- are assigned to the
58 * currently open txg. There is always a txg in the open state so that ZFS can
59 * accept new changes (though the txg may refuse new changes if it has hit
60 * some limit). ZFS advances the open txg to the next state for a variety of
61 * reasons such as it hitting a time or size threshold, or the execution of an
62 * administrative action that must be completed in the syncing state.
66 * After a txg exits the open state, it enters the quiescing state. The
67 * quiescing state is intended to provide a buffer between accepting new
68 * transactions in the open state and writing them out to stable storage in
69 * the syncing state. While quiescing, transactions can continue their
70 * operation without delaying either of the other states. Typically, a txg is
71 * in the quiescing state very briefly since the operations are bounded by
72 * software latencies rather than, say, slower I/O latencies. After all
73 * transactions complete, the txg is ready to enter the next state.
77 * In the syncing state, the in-memory state built up during the open and (to
78 * a lesser degree) the quiescing states is written to stable storage. The
79 * process of writing out modified data can, in turn modify more data. For
80 * example when we write new blocks, we need to allocate space for them; those
81 * allocations modify metadata (space maps)... which themselves must be
82 * written to stable storage. During the sync state, ZFS iterates, writing out
83 * data until it converges and all in-memory changes have been written out.
84 * The first such pass is the largest as it encompasses all the modified user
85 * data (as opposed to filesystem metadata). Subsequent passes typically have
86 * far less data to write as they consist exclusively of filesystem metadata.
88 * To ensure convergence, after a certain number of passes ZFS begins
89 * overwriting locations on stable storage that had been allocated earlier in
90 * the syncing state (and subsequently freed). ZFS usually allocates new
91 * blocks to optimize for large, continuous, writes. For the syncing state to
92 * converge however it must complete a pass where no new blocks are allocated
93 * since each allocation requires a modification of persistent metadata.
94 * Further, to hasten convergence, after a prescribed number of passes, ZFS
95 * also defers frees, and stops compressing.
97 * In addition to writing out user data, we must also execute synctasks during
98 * the syncing context. A synctask is the mechanism by which some
99 * administrative activities work such as creating and destroying snapshots or
100 * datasets. Note that when a synctask is initiated it enters the open txg,
101 * and ZFS then pushes that txg as quickly as possible to completion of the
102 * syncing state in order to reduce the latency of the administrative
103 * activity. To complete the syncing state, ZFS writes out a new uberblock,
104 * the root of the tree of blocks that comprise all state stored on the ZFS
105 * pool. Finally, if there is a quiesced txg waiting, we signal that it can
106 * now transition to the syncing state.
109 static void txg_sync_thread(dsl_pool_t
*dp
);
110 static void txg_quiesce_thread(dsl_pool_t
*dp
);
112 int zfs_txg_timeout
= 5; /* max seconds worth of delta per txg */
115 * Prepare the txg subsystem.
118 txg_init(dsl_pool_t
*dp
, uint64_t txg
)
120 tx_state_t
*tx
= &dp
->dp_tx
;
122 bzero(tx
, sizeof (tx_state_t
));
124 tx
->tx_cpu
= vmem_zalloc(max_ncpus
* sizeof (tx_cpu_t
), KM_SLEEP
);
126 for (c
= 0; c
< max_ncpus
; c
++) {
129 mutex_init(&tx
->tx_cpu
[c
].tc_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
130 mutex_init(&tx
->tx_cpu
[c
].tc_open_lock
, NULL
, MUTEX_DEFAULT
,
132 for (i
= 0; i
< TXG_SIZE
; i
++) {
133 cv_init(&tx
->tx_cpu
[c
].tc_cv
[i
], NULL
, CV_DEFAULT
,
135 list_create(&tx
->tx_cpu
[c
].tc_callbacks
[i
],
136 sizeof (dmu_tx_callback_t
),
137 offsetof(dmu_tx_callback_t
, dcb_node
));
141 mutex_init(&tx
->tx_sync_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
143 cv_init(&tx
->tx_sync_more_cv
, NULL
, CV_DEFAULT
, NULL
);
144 cv_init(&tx
->tx_sync_done_cv
, NULL
, CV_DEFAULT
, NULL
);
145 cv_init(&tx
->tx_quiesce_more_cv
, NULL
, CV_DEFAULT
, NULL
);
146 cv_init(&tx
->tx_quiesce_done_cv
, NULL
, CV_DEFAULT
, NULL
);
147 cv_init(&tx
->tx_exit_cv
, NULL
, CV_DEFAULT
, NULL
);
149 tx
->tx_open_txg
= txg
;
153 * Close down the txg subsystem.
156 txg_fini(dsl_pool_t
*dp
)
158 tx_state_t
*tx
= &dp
->dp_tx
;
161 ASSERT(tx
->tx_threads
== 0);
163 mutex_destroy(&tx
->tx_sync_lock
);
165 cv_destroy(&tx
->tx_sync_more_cv
);
166 cv_destroy(&tx
->tx_sync_done_cv
);
167 cv_destroy(&tx
->tx_quiesce_more_cv
);
168 cv_destroy(&tx
->tx_quiesce_done_cv
);
169 cv_destroy(&tx
->tx_exit_cv
);
171 for (c
= 0; c
< max_ncpus
; c
++) {
174 mutex_destroy(&tx
->tx_cpu
[c
].tc_open_lock
);
175 mutex_destroy(&tx
->tx_cpu
[c
].tc_lock
);
176 for (i
= 0; i
< TXG_SIZE
; i
++) {
177 cv_destroy(&tx
->tx_cpu
[c
].tc_cv
[i
]);
178 list_destroy(&tx
->tx_cpu
[c
].tc_callbacks
[i
]);
182 if (tx
->tx_commit_cb_taskq
!= NULL
)
183 taskq_destroy(tx
->tx_commit_cb_taskq
);
185 vmem_free(tx
->tx_cpu
, max_ncpus
* sizeof (tx_cpu_t
));
187 bzero(tx
, sizeof (tx_state_t
));
191 * Start syncing transaction groups.
194 txg_sync_start(dsl_pool_t
*dp
)
196 tx_state_t
*tx
= &dp
->dp_tx
;
198 mutex_enter(&tx
->tx_sync_lock
);
200 dprintf("pool %p\n", dp
);
202 ASSERT(tx
->tx_threads
== 0);
206 tx
->tx_quiesce_thread
= thread_create(NULL
, 0, txg_quiesce_thread
,
207 dp
, 0, &p0
, TS_RUN
, minclsyspri
);
210 * The sync thread can need a larger-than-default stack size on
211 * 32-bit x86. This is due in part to nested pools and
212 * scrub_visitbp() recursion.
214 tx
->tx_sync_thread
= thread_create(NULL
, 32<<10, txg_sync_thread
,
215 dp
, 0, &p0
, TS_RUN
, minclsyspri
);
217 mutex_exit(&tx
->tx_sync_lock
);
221 txg_thread_enter(tx_state_t
*tx
, callb_cpr_t
*cpr
)
223 CALLB_CPR_INIT(cpr
, &tx
->tx_sync_lock
, callb_generic_cpr
, FTAG
);
224 mutex_enter(&tx
->tx_sync_lock
);
228 txg_thread_exit(tx_state_t
*tx
, callb_cpr_t
*cpr
, kthread_t
**tpp
)
230 ASSERT(*tpp
!= NULL
);
233 cv_broadcast(&tx
->tx_exit_cv
);
234 CALLB_CPR_EXIT(cpr
); /* drops &tx->tx_sync_lock */
239 txg_thread_wait(tx_state_t
*tx
, callb_cpr_t
*cpr
, kcondvar_t
*cv
, clock_t time
)
241 CALLB_CPR_SAFE_BEGIN(cpr
);
244 (void) cv_timedwait_interruptible(cv
, &tx
->tx_sync_lock
,
245 ddi_get_lbolt() + time
);
247 cv_wait_interruptible(cv
, &tx
->tx_sync_lock
);
249 CALLB_CPR_SAFE_END(cpr
, &tx
->tx_sync_lock
);
253 * Stop syncing transaction groups.
256 txg_sync_stop(dsl_pool_t
*dp
)
258 tx_state_t
*tx
= &dp
->dp_tx
;
260 dprintf("pool %p\n", dp
);
262 * Finish off any work in progress.
264 ASSERT(tx
->tx_threads
== 2);
267 * We need to ensure that we've vacated the deferred space_maps.
269 txg_wait_synced(dp
, tx
->tx_open_txg
+ TXG_DEFER_SIZE
);
272 * Wake all sync threads and wait for them to die.
274 mutex_enter(&tx
->tx_sync_lock
);
276 ASSERT(tx
->tx_threads
== 2);
280 cv_broadcast(&tx
->tx_quiesce_more_cv
);
281 cv_broadcast(&tx
->tx_quiesce_done_cv
);
282 cv_broadcast(&tx
->tx_sync_more_cv
);
284 while (tx
->tx_threads
!= 0)
285 cv_wait(&tx
->tx_exit_cv
, &tx
->tx_sync_lock
);
289 mutex_exit(&tx
->tx_sync_lock
);
293 txg_hold_open(dsl_pool_t
*dp
, txg_handle_t
*th
)
295 tx_state_t
*tx
= &dp
->dp_tx
;
300 * It appears the processor id is simply used as a "random"
301 * number to index into the array, and there isn't any other
302 * significance to the chosen tx_cpu. Because.. Why not use
303 * the current cpu to index into the array?
306 tc
= &tx
->tx_cpu
[CPU_SEQID
];
309 mutex_enter(&tc
->tc_open_lock
);
310 txg
= tx
->tx_open_txg
;
312 mutex_enter(&tc
->tc_lock
);
313 tc
->tc_count
[txg
& TXG_MASK
]++;
314 mutex_exit(&tc
->tc_lock
);
323 txg_rele_to_quiesce(txg_handle_t
*th
)
325 tx_cpu_t
*tc
= th
->th_cpu
;
327 ASSERT(!MUTEX_HELD(&tc
->tc_lock
));
328 mutex_exit(&tc
->tc_open_lock
);
332 txg_register_callbacks(txg_handle_t
*th
, list_t
*tx_callbacks
)
334 tx_cpu_t
*tc
= th
->th_cpu
;
335 int g
= th
->th_txg
& TXG_MASK
;
337 mutex_enter(&tc
->tc_lock
);
338 list_move_tail(&tc
->tc_callbacks
[g
], tx_callbacks
);
339 mutex_exit(&tc
->tc_lock
);
343 txg_rele_to_sync(txg_handle_t
*th
)
345 tx_cpu_t
*tc
= th
->th_cpu
;
346 int g
= th
->th_txg
& TXG_MASK
;
348 mutex_enter(&tc
->tc_lock
);
349 ASSERT(tc
->tc_count
[g
] != 0);
350 if (--tc
->tc_count
[g
] == 0)
351 cv_broadcast(&tc
->tc_cv
[g
]);
352 mutex_exit(&tc
->tc_lock
);
354 th
->th_cpu
= NULL
; /* defensive */
358 * Blocks until all transactions in the group are committed.
360 * On return, the transaction group has reached a stable state in which it can
361 * then be passed off to the syncing context.
364 txg_quiesce(dsl_pool_t
*dp
, uint64_t txg
)
366 tx_state_t
*tx
= &dp
->dp_tx
;
367 int g
= txg
& TXG_MASK
;
371 * Grab all tc_open_locks so nobody else can get into this txg.
373 for (c
= 0; c
< max_ncpus
; c
++)
374 mutex_enter(&tx
->tx_cpu
[c
].tc_open_lock
);
376 ASSERT(txg
== tx
->tx_open_txg
);
378 tx
->tx_open_time
= gethrtime();
380 spa_txg_history_set(dp
->dp_spa
, txg
, TXG_STATE_OPEN
, gethrtime());
381 spa_txg_history_add(dp
->dp_spa
, tx
->tx_open_txg
);
383 DTRACE_PROBE2(txg__quiescing
, dsl_pool_t
*, dp
, uint64_t, txg
);
384 DTRACE_PROBE2(txg__opened
, dsl_pool_t
*, dp
, uint64_t, tx
->tx_open_txg
);
387 * Now that we've incremented tx_open_txg, we can let threads
388 * enter the next transaction group.
390 for (c
= 0; c
< max_ncpus
; c
++)
391 mutex_exit(&tx
->tx_cpu
[c
].tc_open_lock
);
394 * Quiesce the transaction group by waiting for everyone to txg_exit().
396 for (c
= 0; c
< max_ncpus
; c
++) {
397 tx_cpu_t
*tc
= &tx
->tx_cpu
[c
];
398 mutex_enter(&tc
->tc_lock
);
399 while (tc
->tc_count
[g
] != 0)
400 cv_wait(&tc
->tc_cv
[g
], &tc
->tc_lock
);
401 mutex_exit(&tc
->tc_lock
);
404 spa_txg_history_set(dp
->dp_spa
, txg
, TXG_STATE_QUIESCED
, gethrtime());
408 txg_do_callbacks(list_t
*cb_list
)
410 dmu_tx_do_callbacks(cb_list
, 0);
412 list_destroy(cb_list
);
414 kmem_free(cb_list
, sizeof (list_t
));
418 * Dispatch the commit callbacks registered on this txg to worker threads.
420 * If no callbacks are registered for a given TXG, nothing happens.
421 * This function creates a taskq for the associated pool, if needed.
424 txg_dispatch_callbacks(dsl_pool_t
*dp
, uint64_t txg
)
427 tx_state_t
*tx
= &dp
->dp_tx
;
430 for (c
= 0; c
< max_ncpus
; c
++) {
431 tx_cpu_t
*tc
= &tx
->tx_cpu
[c
];
433 * No need to lock tx_cpu_t at this point, since this can
434 * only be called once a txg has been synced.
437 int g
= txg
& TXG_MASK
;
439 if (list_is_empty(&tc
->tc_callbacks
[g
]))
442 if (tx
->tx_commit_cb_taskq
== NULL
) {
444 * Commit callback taskq hasn't been created yet.
446 tx
->tx_commit_cb_taskq
= taskq_create("tx_commit_cb",
447 100, minclsyspri
, max_ncpus
, INT_MAX
,
448 TASKQ_THREADS_CPU_PCT
| TASKQ_PREPOPULATE
);
451 cb_list
= kmem_alloc(sizeof (list_t
), KM_PUSHPAGE
);
452 list_create(cb_list
, sizeof (dmu_tx_callback_t
),
453 offsetof(dmu_tx_callback_t
, dcb_node
));
455 list_move_tail(cb_list
, &tc
->tc_callbacks
[g
]);
457 (void) taskq_dispatch(tx
->tx_commit_cb_taskq
, (task_func_t
*)
458 txg_do_callbacks
, cb_list
, TQ_SLEEP
);
463 * Wait for pending commit callbacks of already-synced transactions to finish
465 * Calling this function from within a commit callback will deadlock.
468 txg_wait_callbacks(dsl_pool_t
*dp
)
470 tx_state_t
*tx
= &dp
->dp_tx
;
472 if (tx
->tx_commit_cb_taskq
!= NULL
)
473 taskq_wait(tx
->tx_commit_cb_taskq
);
477 txg_sync_thread(dsl_pool_t
*dp
)
479 spa_t
*spa
= dp
->dp_spa
;
480 tx_state_t
*tx
= &dp
->dp_tx
;
482 vdev_stat_t
*vs1
, *vs2
;
483 uint64_t start
, delta
;
487 * Annotate this process with a flag that indicates that it is
488 * unsafe to use KM_SLEEP during memory allocations due to the
489 * potential for a deadlock. KM_PUSHPAGE should be used instead.
491 current
->flags
|= PF_NOFS
;
494 txg_thread_enter(tx
, &cpr
);
496 vs1
= kmem_alloc(sizeof(vdev_stat_t
), KM_PUSHPAGE
);
497 vs2
= kmem_alloc(sizeof(vdev_stat_t
), KM_PUSHPAGE
);
501 uint64_t timer
, timeout
;
504 timeout
= zfs_txg_timeout
* hz
;
507 * We sync when we're scanning, there's someone waiting
508 * on us, or the quiesce thread has handed off a txg to
509 * us, or we have reached our timeout.
511 timer
= (delta
>= timeout
? 0 : timeout
- delta
);
512 while (!dsl_scan_active(dp
->dp_scan
) &&
513 !tx
->tx_exiting
&& timer
> 0 &&
514 tx
->tx_synced_txg
>= tx
->tx_sync_txg_waiting
&&
515 tx
->tx_quiesced_txg
== 0 &&
516 dp
->dp_dirty_total
< zfs_dirty_data_sync
) {
517 dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
518 tx
->tx_synced_txg
, tx
->tx_sync_txg_waiting
, dp
);
519 txg_thread_wait(tx
, &cpr
, &tx
->tx_sync_more_cv
, timer
);
520 delta
= ddi_get_lbolt() - start
;
521 timer
= (delta
> timeout
? 0 : timeout
- delta
);
525 * Wait until the quiesce thread hands off a txg to us,
526 * prompting it to do so if necessary.
528 while (!tx
->tx_exiting
&& tx
->tx_quiesced_txg
== 0) {
529 if (tx
->tx_quiesce_txg_waiting
< tx
->tx_open_txg
+1)
530 tx
->tx_quiesce_txg_waiting
= tx
->tx_open_txg
+1;
531 cv_broadcast(&tx
->tx_quiesce_more_cv
);
532 txg_thread_wait(tx
, &cpr
, &tx
->tx_quiesce_done_cv
, 0);
535 if (tx
->tx_exiting
) {
536 kmem_free(vs2
, sizeof(vdev_stat_t
));
537 kmem_free(vs1
, sizeof(vdev_stat_t
));
538 txg_thread_exit(tx
, &cpr
, &tx
->tx_sync_thread
);
541 vdev_get_stats(spa
->spa_root_vdev
, vs1
);
544 * Consume the quiesced txg which has been handed off to
545 * us. This may cause the quiescing thread to now be
546 * able to quiesce another txg, so we must signal it.
548 txg
= tx
->tx_quiesced_txg
;
549 tx
->tx_quiesced_txg
= 0;
550 tx
->tx_syncing_txg
= txg
;
551 DTRACE_PROBE2(txg__syncing
, dsl_pool_t
*, dp
, uint64_t, txg
);
552 cv_broadcast(&tx
->tx_quiesce_more_cv
);
554 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
555 txg
, tx
->tx_quiesce_txg_waiting
, tx
->tx_sync_txg_waiting
);
556 mutex_exit(&tx
->tx_sync_lock
);
558 start
= ddi_get_lbolt();
560 delta
= ddi_get_lbolt() - start
;
562 mutex_enter(&tx
->tx_sync_lock
);
563 tx
->tx_synced_txg
= txg
;
564 tx
->tx_syncing_txg
= 0;
565 DTRACE_PROBE2(txg__synced
, dsl_pool_t
*, dp
, uint64_t, txg
);
566 cv_broadcast(&tx
->tx_sync_done_cv
);
569 * Dispatch commit callbacks to worker threads.
571 txg_dispatch_callbacks(dp
, txg
);
573 vdev_get_stats(spa
->spa_root_vdev
, vs2
);
574 spa_txg_history_set_io(spa
, txg
,
575 vs2
->vs_bytes
[ZIO_TYPE_READ
]-vs1
->vs_bytes
[ZIO_TYPE_READ
],
576 vs2
->vs_bytes
[ZIO_TYPE_WRITE
]-vs1
->vs_bytes
[ZIO_TYPE_WRITE
],
577 vs2
->vs_ops
[ZIO_TYPE_READ
]-vs1
->vs_ops
[ZIO_TYPE_READ
],
578 vs2
->vs_ops
[ZIO_TYPE_WRITE
]-vs1
->vs_ops
[ZIO_TYPE_WRITE
],
579 dp
->dp_dirty_pertxg
[txg
& TXG_MASK
]);
580 spa_txg_history_set(spa
, txg
, TXG_STATE_SYNCED
, gethrtime());
585 txg_quiesce_thread(dsl_pool_t
*dp
)
587 tx_state_t
*tx
= &dp
->dp_tx
;
590 txg_thread_enter(tx
, &cpr
);
596 * We quiesce when there's someone waiting on us.
597 * However, we can only have one txg in "quiescing" or
598 * "quiesced, waiting to sync" state. So we wait until
599 * the "quiesced, waiting to sync" txg has been consumed
600 * by the sync thread.
602 while (!tx
->tx_exiting
&&
603 (tx
->tx_open_txg
>= tx
->tx_quiesce_txg_waiting
||
604 tx
->tx_quiesced_txg
!= 0))
605 txg_thread_wait(tx
, &cpr
, &tx
->tx_quiesce_more_cv
, 0);
608 txg_thread_exit(tx
, &cpr
, &tx
->tx_quiesce_thread
);
610 txg
= tx
->tx_open_txg
;
611 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
612 txg
, tx
->tx_quiesce_txg_waiting
,
613 tx
->tx_sync_txg_waiting
);
614 mutex_exit(&tx
->tx_sync_lock
);
615 txg_quiesce(dp
, txg
);
616 mutex_enter(&tx
->tx_sync_lock
);
619 * Hand this txg off to the sync thread.
621 dprintf("quiesce done, handing off txg %llu\n", txg
);
622 tx
->tx_quiesced_txg
= txg
;
623 DTRACE_PROBE2(txg__quiesced
, dsl_pool_t
*, dp
, uint64_t, txg
);
624 cv_broadcast(&tx
->tx_sync_more_cv
);
625 cv_broadcast(&tx
->tx_quiesce_done_cv
);
630 * Delay this thread by delay nanoseconds if we are still in the open
631 * transaction group and there is already a waiting txg quiesing or quiesced.
632 * Abort the delay if this txg stalls or enters the quiesing state.
635 txg_delay(dsl_pool_t
*dp
, uint64_t txg
, hrtime_t delay
, hrtime_t resolution
)
637 tx_state_t
*tx
= &dp
->dp_tx
;
638 hrtime_t start
= gethrtime();
640 /* don't delay if this txg could transition to quiescing immediately */
641 if (tx
->tx_open_txg
> txg
||
642 tx
->tx_syncing_txg
== txg
-1 || tx
->tx_synced_txg
== txg
-1)
645 mutex_enter(&tx
->tx_sync_lock
);
646 if (tx
->tx_open_txg
> txg
|| tx
->tx_synced_txg
== txg
-1) {
647 mutex_exit(&tx
->tx_sync_lock
);
651 while (gethrtime() - start
< delay
&&
652 tx
->tx_syncing_txg
< txg
-1 && !txg_stalled(dp
)) {
653 (void) cv_timedwait_hires(&tx
->tx_quiesce_more_cv
,
654 &tx
->tx_sync_lock
, delay
, resolution
, 0);
657 DMU_TX_STAT_BUMP(dmu_tx_delay
);
659 mutex_exit(&tx
->tx_sync_lock
);
663 txg_wait_synced(dsl_pool_t
*dp
, uint64_t txg
)
665 tx_state_t
*tx
= &dp
->dp_tx
;
667 ASSERT(!dsl_pool_config_held(dp
));
669 mutex_enter(&tx
->tx_sync_lock
);
670 ASSERT(tx
->tx_threads
== 2);
672 txg
= tx
->tx_open_txg
+ TXG_DEFER_SIZE
;
673 if (tx
->tx_sync_txg_waiting
< txg
)
674 tx
->tx_sync_txg_waiting
= txg
;
675 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
676 txg
, tx
->tx_quiesce_txg_waiting
, tx
->tx_sync_txg_waiting
);
677 while (tx
->tx_synced_txg
< txg
) {
678 dprintf("broadcasting sync more "
679 "tx_synced=%llu waiting=%llu dp=%p\n",
680 tx
->tx_synced_txg
, tx
->tx_sync_txg_waiting
, dp
);
681 cv_broadcast(&tx
->tx_sync_more_cv
);
682 cv_wait(&tx
->tx_sync_done_cv
, &tx
->tx_sync_lock
);
684 mutex_exit(&tx
->tx_sync_lock
);
688 txg_wait_open(dsl_pool_t
*dp
, uint64_t txg
)
690 tx_state_t
*tx
= &dp
->dp_tx
;
692 ASSERT(!dsl_pool_config_held(dp
));
694 mutex_enter(&tx
->tx_sync_lock
);
695 ASSERT(tx
->tx_threads
== 2);
697 txg
= tx
->tx_open_txg
+ 1;
698 if (tx
->tx_quiesce_txg_waiting
< txg
)
699 tx
->tx_quiesce_txg_waiting
= txg
;
700 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
701 txg
, tx
->tx_quiesce_txg_waiting
, tx
->tx_sync_txg_waiting
);
702 while (tx
->tx_open_txg
< txg
) {
703 cv_broadcast(&tx
->tx_quiesce_more_cv
);
704 cv_wait(&tx
->tx_quiesce_done_cv
, &tx
->tx_sync_lock
);
706 mutex_exit(&tx
->tx_sync_lock
);
710 * If there isn't a txg syncing or in the pipeline, push another txg through
711 * the pipeline by queiscing the open txg.
714 txg_kick(dsl_pool_t
*dp
)
716 tx_state_t
*tx
= &dp
->dp_tx
;
718 ASSERT(!dsl_pool_config_held(dp
));
720 mutex_enter(&tx
->tx_sync_lock
);
721 if (tx
->tx_syncing_txg
== 0 &&
722 tx
->tx_quiesce_txg_waiting
<= tx
->tx_open_txg
&&
723 tx
->tx_sync_txg_waiting
<= tx
->tx_synced_txg
&&
724 tx
->tx_quiesced_txg
<= tx
->tx_synced_txg
) {
725 tx
->tx_quiesce_txg_waiting
= tx
->tx_open_txg
+ 1;
726 cv_broadcast(&tx
->tx_quiesce_more_cv
);
728 mutex_exit(&tx
->tx_sync_lock
);
732 txg_stalled(dsl_pool_t
*dp
)
734 tx_state_t
*tx
= &dp
->dp_tx
;
735 return (tx
->tx_quiesce_txg_waiting
> tx
->tx_open_txg
);
739 txg_sync_waiting(dsl_pool_t
*dp
)
741 tx_state_t
*tx
= &dp
->dp_tx
;
743 return (tx
->tx_syncing_txg
<= tx
->tx_sync_txg_waiting
||
744 tx
->tx_quiesced_txg
!= 0);
748 * Per-txg object lists.
751 txg_list_create(txg_list_t
*tl
, size_t offset
)
755 mutex_init(&tl
->tl_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
757 tl
->tl_offset
= offset
;
759 for (t
= 0; t
< TXG_SIZE
; t
++)
760 tl
->tl_head
[t
] = NULL
;
764 txg_list_destroy(txg_list_t
*tl
)
768 for (t
= 0; t
< TXG_SIZE
; t
++)
769 ASSERT(txg_list_empty(tl
, t
));
771 mutex_destroy(&tl
->tl_lock
);
775 txg_list_empty(txg_list_t
*tl
, uint64_t txg
)
777 return (tl
->tl_head
[txg
& TXG_MASK
] == NULL
);
781 * Add an entry to the list (unless it's already on the list).
782 * Returns B_TRUE if it was actually added.
785 txg_list_add(txg_list_t
*tl
, void *p
, uint64_t txg
)
787 int t
= txg
& TXG_MASK
;
788 txg_node_t
*tn
= (txg_node_t
*)((char *)p
+ tl
->tl_offset
);
791 mutex_enter(&tl
->tl_lock
);
792 add
= (tn
->tn_member
[t
] == 0);
794 tn
->tn_member
[t
] = 1;
795 tn
->tn_next
[t
] = tl
->tl_head
[t
];
798 mutex_exit(&tl
->tl_lock
);
804 * Add an entry to the end of the list, unless it's already on the list.
805 * (walks list to find end)
806 * Returns B_TRUE if it was actually added.
809 txg_list_add_tail(txg_list_t
*tl
, void *p
, uint64_t txg
)
811 int t
= txg
& TXG_MASK
;
812 txg_node_t
*tn
= (txg_node_t
*)((char *)p
+ tl
->tl_offset
);
815 mutex_enter(&tl
->tl_lock
);
816 add
= (tn
->tn_member
[t
] == 0);
820 for (tp
= &tl
->tl_head
[t
]; *tp
!= NULL
; tp
= &(*tp
)->tn_next
[t
])
823 tn
->tn_member
[t
] = 1;
824 tn
->tn_next
[t
] = NULL
;
827 mutex_exit(&tl
->tl_lock
);
833 * Remove the head of the list and return it.
836 txg_list_remove(txg_list_t
*tl
, uint64_t txg
)
838 int t
= txg
& TXG_MASK
;
842 mutex_enter(&tl
->tl_lock
);
843 if ((tn
= tl
->tl_head
[t
]) != NULL
) {
844 p
= (char *)tn
- tl
->tl_offset
;
845 tl
->tl_head
[t
] = tn
->tn_next
[t
];
846 tn
->tn_next
[t
] = NULL
;
847 tn
->tn_member
[t
] = 0;
849 mutex_exit(&tl
->tl_lock
);
855 * Remove a specific item from the list and return it.
858 txg_list_remove_this(txg_list_t
*tl
, void *p
, uint64_t txg
)
860 int t
= txg
& TXG_MASK
;
861 txg_node_t
*tn
, **tp
;
863 mutex_enter(&tl
->tl_lock
);
865 for (tp
= &tl
->tl_head
[t
]; (tn
= *tp
) != NULL
; tp
= &tn
->tn_next
[t
]) {
866 if ((char *)tn
- tl
->tl_offset
== p
) {
867 *tp
= tn
->tn_next
[t
];
868 tn
->tn_next
[t
] = NULL
;
869 tn
->tn_member
[t
] = 0;
870 mutex_exit(&tl
->tl_lock
);
875 mutex_exit(&tl
->tl_lock
);
881 txg_list_member(txg_list_t
*tl
, void *p
, uint64_t txg
)
883 int t
= txg
& TXG_MASK
;
884 txg_node_t
*tn
= (txg_node_t
*)((char *)p
+ tl
->tl_offset
);
886 return (tn
->tn_member
[t
] != 0);
890 * Walk a txg list -- only safe if you know it's not changing.
893 txg_list_head(txg_list_t
*tl
, uint64_t txg
)
895 int t
= txg
& TXG_MASK
;
896 txg_node_t
*tn
= tl
->tl_head
[t
];
898 return (tn
== NULL
? NULL
: (char *)tn
- tl
->tl_offset
);
902 txg_list_next(txg_list_t
*tl
, void *p
, uint64_t txg
)
904 int t
= txg
& TXG_MASK
;
905 txg_node_t
*tn
= (txg_node_t
*)((char *)p
+ tl
->tl_offset
);
909 return (tn
== NULL
? NULL
: (char *)tn
- tl
->tl_offset
);
912 #if defined(_KERNEL) && defined(HAVE_SPL)
913 EXPORT_SYMBOL(txg_init
);
914 EXPORT_SYMBOL(txg_fini
);
915 EXPORT_SYMBOL(txg_sync_start
);
916 EXPORT_SYMBOL(txg_sync_stop
);
917 EXPORT_SYMBOL(txg_hold_open
);
918 EXPORT_SYMBOL(txg_rele_to_quiesce
);
919 EXPORT_SYMBOL(txg_rele_to_sync
);
920 EXPORT_SYMBOL(txg_register_callbacks
);
921 EXPORT_SYMBOL(txg_delay
);
922 EXPORT_SYMBOL(txg_wait_synced
);
923 EXPORT_SYMBOL(txg_wait_open
);
924 EXPORT_SYMBOL(txg_wait_callbacks
);
925 EXPORT_SYMBOL(txg_stalled
);
926 EXPORT_SYMBOL(txg_sync_waiting
);
928 module_param(zfs_txg_timeout
, int, 0644);
929 MODULE_PARM_DESC(zfs_txg_timeout
, "Max seconds worth of delta per txg");