]>
git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/txg.c
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Portions Copyright 2011 Martin Matuska
24 * Copyright (c) 2013 by Delphix. All rights reserved.
27 #include <sys/zfs_context.h>
28 #include <sys/txg_impl.h>
29 #include <sys/dmu_impl.h>
30 #include <sys/spa_impl.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dsl_pool.h>
33 #include <sys/dsl_scan.h>
34 #include <sys/callb.h>
37 * ZFS Transaction Groups
38 * ----------------------
40 * ZFS transaction groups are, as the name implies, groups of transactions
41 * that act on persistent state. ZFS asserts consistency at the granularity of
42 * these transaction groups. Each successive transaction group (txg) is
43 * assigned a 64-bit consecutive identifier. There are three active
44 * transaction group states: open, quiescing, or syncing. At any given time,
45 * there may be an active txg associated with each state; each active txg may
46 * either be processing, or blocked waiting to enter the next state. There may
47 * be up to three active txgs, and there is always a txg in the open state
48 * (though it may be blocked waiting to enter the quiescing state). In broad
49 * strokes, transactions — operations that change in-memory structures — are
50 * accepted into the txg in the open state, and are completed while the txg is
51 * in the open or quiescing states. The accumulated changes are written to
52 * disk in the syncing state.
56 * When a new txg becomes active, it first enters the open state. New
57 * transactions — updates to in-memory structures — are assigned to the
58 * currently open txg. There is always a txg in the open state so that ZFS can
59 * accept new changes (though the txg may refuse new changes if it has hit
60 * some limit). ZFS advances the open txg to the next state for a variety of
61 * reasons such as it hitting a time or size threshold, or the execution of an
62 * administrative action that must be completed in the syncing state.
66 * After a txg exits the open state, it enters the quiescing state. The
67 * quiescing state is intended to provide a buffer between accepting new
68 * transactions in the open state and writing them out to stable storage in
69 * the syncing state. While quiescing, transactions can continue their
70 * operation without delaying either of the other states. Typically, a txg is
71 * in the quiescing state very briefly since the operations are bounded by
72 * software latencies rather than, say, slower I/O latencies. After all
73 * transactions complete, the txg is ready to enter the next state.
77 * In the syncing state, the in-memory state built up during the open and (to
78 * a lesser degree) the quiescing states is written to stable storage. The
79 * process of writing out modified data can, in turn modify more data. For
80 * example when we write new blocks, we need to allocate space for them; those
81 * allocations modify metadata (space maps)... which themselves must be
82 * written to stable storage. During the sync state, ZFS iterates, writing out
83 * data until it converges and all in-memory changes have been written out.
84 * The first such pass is the largest as it encompasses all the modified user
85 * data (as opposed to filesystem metadata). Subsequent passes typically have
86 * far less data to write as they consist exclusively of filesystem metadata.
88 * To ensure convergence, after a certain number of passes ZFS begins
89 * overwriting locations on stable storage that had been allocated earlier in
90 * the syncing state (and subsequently freed). ZFS usually allocates new
91 * blocks to optimize for large, continuous, writes. For the syncing state to
92 * converge however it must complete a pass where no new blocks are allocated
93 * since each allocation requires a modification of persistent metadata.
94 * Further, to hasten convergence, after a prescribed number of passes, ZFS
95 * also defers frees, and stops compressing.
97 * In addition to writing out user data, we must also execute synctasks during
98 * the syncing context. A synctask is the mechanism by which some
99 * administrative activities work such as creating and destroying snapshots or
100 * datasets. Note that when a synctask is initiated it enters the open txg,
101 * and ZFS then pushes that txg as quickly as possible to completion of the
102 * syncing state in order to reduce the latency of the administrative
103 * activity. To complete the syncing state, ZFS writes out a new uberblock,
104 * the root of the tree of blocks that comprise all state stored on the ZFS
105 * pool. Finally, if there is a quiesced txg waiting, we signal that it can
106 * now transition to the syncing state.
109 static void txg_sync_thread(dsl_pool_t
*dp
);
110 static void txg_quiesce_thread(dsl_pool_t
*dp
);
112 int zfs_txg_timeout
= 5; /* max seconds worth of delta per txg */
115 * Prepare the txg subsystem.
118 txg_init(dsl_pool_t
*dp
, uint64_t txg
)
120 tx_state_t
*tx
= &dp
->dp_tx
;
122 bzero(tx
, sizeof (tx_state_t
));
124 tx
->tx_cpu
= vmem_zalloc(max_ncpus
* sizeof (tx_cpu_t
), KM_SLEEP
);
126 for (c
= 0; c
< max_ncpus
; c
++) {
129 mutex_init(&tx
->tx_cpu
[c
].tc_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
130 for (i
= 0; i
< TXG_SIZE
; i
++) {
131 cv_init(&tx
->tx_cpu
[c
].tc_cv
[i
], NULL
, CV_DEFAULT
,
133 list_create(&tx
->tx_cpu
[c
].tc_callbacks
[i
],
134 sizeof (dmu_tx_callback_t
),
135 offsetof(dmu_tx_callback_t
, dcb_node
));
139 mutex_init(&tx
->tx_sync_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
141 cv_init(&tx
->tx_sync_more_cv
, NULL
, CV_DEFAULT
, NULL
);
142 cv_init(&tx
->tx_sync_done_cv
, NULL
, CV_DEFAULT
, NULL
);
143 cv_init(&tx
->tx_quiesce_more_cv
, NULL
, CV_DEFAULT
, NULL
);
144 cv_init(&tx
->tx_quiesce_done_cv
, NULL
, CV_DEFAULT
, NULL
);
145 cv_init(&tx
->tx_exit_cv
, NULL
, CV_DEFAULT
, NULL
);
147 tx
->tx_open_txg
= txg
;
151 * Close down the txg subsystem.
154 txg_fini(dsl_pool_t
*dp
)
156 tx_state_t
*tx
= &dp
->dp_tx
;
159 ASSERT(tx
->tx_threads
== 0);
161 mutex_destroy(&tx
->tx_sync_lock
);
163 cv_destroy(&tx
->tx_sync_more_cv
);
164 cv_destroy(&tx
->tx_sync_done_cv
);
165 cv_destroy(&tx
->tx_quiesce_more_cv
);
166 cv_destroy(&tx
->tx_quiesce_done_cv
);
167 cv_destroy(&tx
->tx_exit_cv
);
169 for (c
= 0; c
< max_ncpus
; c
++) {
172 mutex_destroy(&tx
->tx_cpu
[c
].tc_lock
);
173 for (i
= 0; i
< TXG_SIZE
; i
++) {
174 cv_destroy(&tx
->tx_cpu
[c
].tc_cv
[i
]);
175 list_destroy(&tx
->tx_cpu
[c
].tc_callbacks
[i
]);
179 if (tx
->tx_commit_cb_taskq
!= NULL
)
180 taskq_destroy(tx
->tx_commit_cb_taskq
);
182 vmem_free(tx
->tx_cpu
, max_ncpus
* sizeof (tx_cpu_t
));
184 bzero(tx
, sizeof (tx_state_t
));
188 * Start syncing transaction groups.
191 txg_sync_start(dsl_pool_t
*dp
)
193 tx_state_t
*tx
= &dp
->dp_tx
;
195 mutex_enter(&tx
->tx_sync_lock
);
197 dprintf("pool %p\n", dp
);
199 ASSERT(tx
->tx_threads
== 0);
203 tx
->tx_quiesce_thread
= thread_create(NULL
, 0, txg_quiesce_thread
,
204 dp
, 0, &p0
, TS_RUN
, minclsyspri
);
207 * The sync thread can need a larger-than-default stack size on
208 * 32-bit x86. This is due in part to nested pools and
209 * scrub_visitbp() recursion.
211 tx
->tx_sync_thread
= thread_create(NULL
, 32<<10, txg_sync_thread
,
212 dp
, 0, &p0
, TS_RUN
, minclsyspri
);
214 mutex_exit(&tx
->tx_sync_lock
);
218 txg_thread_enter(tx_state_t
*tx
, callb_cpr_t
*cpr
)
220 CALLB_CPR_INIT(cpr
, &tx
->tx_sync_lock
, callb_generic_cpr
, FTAG
);
221 mutex_enter(&tx
->tx_sync_lock
);
225 txg_thread_exit(tx_state_t
*tx
, callb_cpr_t
*cpr
, kthread_t
**tpp
)
227 ASSERT(*tpp
!= NULL
);
230 cv_broadcast(&tx
->tx_exit_cv
);
231 CALLB_CPR_EXIT(cpr
); /* drops &tx->tx_sync_lock */
236 txg_thread_wait(tx_state_t
*tx
, callb_cpr_t
*cpr
, kcondvar_t
*cv
, uint64_t time
)
238 CALLB_CPR_SAFE_BEGIN(cpr
);
241 (void) cv_timedwait_interruptible(cv
, &tx
->tx_sync_lock
,
242 ddi_get_lbolt() + time
);
244 cv_wait_interruptible(cv
, &tx
->tx_sync_lock
);
246 CALLB_CPR_SAFE_END(cpr
, &tx
->tx_sync_lock
);
250 * Stop syncing transaction groups.
253 txg_sync_stop(dsl_pool_t
*dp
)
255 tx_state_t
*tx
= &dp
->dp_tx
;
257 dprintf("pool %p\n", dp
);
259 * Finish off any work in progress.
261 ASSERT(tx
->tx_threads
== 2);
264 * We need to ensure that we've vacated the deferred space_maps.
266 txg_wait_synced(dp
, tx
->tx_open_txg
+ TXG_DEFER_SIZE
);
269 * Wake all sync threads and wait for them to die.
271 mutex_enter(&tx
->tx_sync_lock
);
273 ASSERT(tx
->tx_threads
== 2);
277 cv_broadcast(&tx
->tx_quiesce_more_cv
);
278 cv_broadcast(&tx
->tx_quiesce_done_cv
);
279 cv_broadcast(&tx
->tx_sync_more_cv
);
281 while (tx
->tx_threads
!= 0)
282 cv_wait(&tx
->tx_exit_cv
, &tx
->tx_sync_lock
);
286 mutex_exit(&tx
->tx_sync_lock
);
290 txg_hold_open(dsl_pool_t
*dp
, txg_handle_t
*th
)
292 tx_state_t
*tx
= &dp
->dp_tx
;
297 * It appears the processor id is simply used as a "random"
298 * number to index into the array, and there isn't any other
299 * significance to the chosen tx_cpu. Because.. Why not use
300 * the current cpu to index into the array?
303 tc
= &tx
->tx_cpu
[CPU_SEQID
];
306 mutex_enter(&tc
->tc_lock
);
308 txg
= tx
->tx_open_txg
;
309 tc
->tc_count
[txg
& TXG_MASK
]++;
318 txg_rele_to_quiesce(txg_handle_t
*th
)
320 tx_cpu_t
*tc
= th
->th_cpu
;
322 mutex_exit(&tc
->tc_lock
);
326 txg_register_callbacks(txg_handle_t
*th
, list_t
*tx_callbacks
)
328 tx_cpu_t
*tc
= th
->th_cpu
;
329 int g
= th
->th_txg
& TXG_MASK
;
331 mutex_enter(&tc
->tc_lock
);
332 list_move_tail(&tc
->tc_callbacks
[g
], tx_callbacks
);
333 mutex_exit(&tc
->tc_lock
);
337 txg_rele_to_sync(txg_handle_t
*th
)
339 tx_cpu_t
*tc
= th
->th_cpu
;
340 int g
= th
->th_txg
& TXG_MASK
;
342 mutex_enter(&tc
->tc_lock
);
343 ASSERT(tc
->tc_count
[g
] != 0);
344 if (--tc
->tc_count
[g
] == 0)
345 cv_broadcast(&tc
->tc_cv
[g
]);
346 mutex_exit(&tc
->tc_lock
);
348 th
->th_cpu
= NULL
; /* defensive */
352 txg_quiesce(dsl_pool_t
*dp
, uint64_t txg
)
354 tx_state_t
*tx
= &dp
->dp_tx
;
355 int g
= txg
& TXG_MASK
;
359 * Grab all tx_cpu locks so nobody else can get into this txg.
361 for (c
= 0; c
< max_ncpus
; c
++)
362 mutex_enter(&tx
->tx_cpu
[c
].tc_lock
);
364 ASSERT(txg
== tx
->tx_open_txg
);
367 spa_txg_history_set(dp
->dp_spa
, txg
, TXG_STATE_OPEN
, gethrtime());
368 spa_txg_history_add(dp
->dp_spa
, tx
->tx_open_txg
);
371 * Now that we've incremented tx_open_txg, we can let threads
372 * enter the next transaction group.
374 for (c
= 0; c
< max_ncpus
; c
++)
375 mutex_exit(&tx
->tx_cpu
[c
].tc_lock
);
378 * Quiesce the transaction group by waiting for everyone to txg_exit().
380 for (c
= 0; c
< max_ncpus
; c
++) {
381 tx_cpu_t
*tc
= &tx
->tx_cpu
[c
];
382 mutex_enter(&tc
->tc_lock
);
383 while (tc
->tc_count
[g
] != 0)
384 cv_wait(&tc
->tc_cv
[g
], &tc
->tc_lock
);
385 mutex_exit(&tc
->tc_lock
);
388 spa_txg_history_set(dp
->dp_spa
, txg
, TXG_STATE_QUIESCED
, gethrtime());
392 txg_do_callbacks(list_t
*cb_list
)
394 dmu_tx_do_callbacks(cb_list
, 0);
396 list_destroy(cb_list
);
398 kmem_free(cb_list
, sizeof (list_t
));
402 * Dispatch the commit callbacks registered on this txg to worker threads.
405 txg_dispatch_callbacks(dsl_pool_t
*dp
, uint64_t txg
)
408 tx_state_t
*tx
= &dp
->dp_tx
;
411 for (c
= 0; c
< max_ncpus
; c
++) {
412 tx_cpu_t
*tc
= &tx
->tx_cpu
[c
];
413 /* No need to lock tx_cpu_t at this point */
415 int g
= txg
& TXG_MASK
;
417 if (list_is_empty(&tc
->tc_callbacks
[g
]))
420 if (tx
->tx_commit_cb_taskq
== NULL
) {
422 * Commit callback taskq hasn't been created yet.
424 tx
->tx_commit_cb_taskq
= taskq_create("tx_commit_cb",
425 100, minclsyspri
, max_ncpus
, INT_MAX
,
426 TASKQ_THREADS_CPU_PCT
| TASKQ_PREPOPULATE
);
429 cb_list
= kmem_alloc(sizeof (list_t
), KM_PUSHPAGE
);
430 list_create(cb_list
, sizeof (dmu_tx_callback_t
),
431 offsetof(dmu_tx_callback_t
, dcb_node
));
433 list_move_tail(cb_list
, &tc
->tc_callbacks
[g
]);
435 (void) taskq_dispatch(tx
->tx_commit_cb_taskq
, (task_func_t
*)
436 txg_do_callbacks
, cb_list
, TQ_SLEEP
);
441 * Wait for pending commit callbacks of already-synced transactions to finish
443 * Calling this function from within a commit callback will deadlock.
446 txg_wait_callbacks(dsl_pool_t
*dp
)
448 tx_state_t
*tx
= &dp
->dp_tx
;
450 if (tx
->tx_commit_cb_taskq
!= NULL
)
451 taskq_wait(tx
->tx_commit_cb_taskq
);
455 txg_sync_thread(dsl_pool_t
*dp
)
457 spa_t
*spa
= dp
->dp_spa
;
458 tx_state_t
*tx
= &dp
->dp_tx
;
460 vdev_stat_t
*vs1
, *vs2
;
461 uint64_t start
, delta
;
465 * Annotate this process with a flag that indicates that it is
466 * unsafe to use KM_SLEEP during memory allocations due to the
467 * potential for a deadlock. KM_PUSHPAGE should be used instead.
469 current
->flags
|= PF_NOFS
;
472 txg_thread_enter(tx
, &cpr
);
474 vs1
= kmem_alloc(sizeof(vdev_stat_t
), KM_PUSHPAGE
);
475 vs2
= kmem_alloc(sizeof(vdev_stat_t
), KM_PUSHPAGE
);
479 uint64_t timer
, timeout
;
482 timeout
= zfs_txg_timeout
* hz
;
485 * We sync when we're scanning, there's someone waiting
486 * on us, or the quiesce thread has handed off a txg to
487 * us, or we have reached our timeout.
489 timer
= (delta
>= timeout
? 0 : timeout
- delta
);
490 while (!dsl_scan_active(dp
->dp_scan
) &&
491 !tx
->tx_exiting
&& timer
> 0 &&
492 tx
->tx_synced_txg
>= tx
->tx_sync_txg_waiting
&&
493 tx
->tx_quiesced_txg
== 0) {
494 dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
495 tx
->tx_synced_txg
, tx
->tx_sync_txg_waiting
, dp
);
496 txg_thread_wait(tx
, &cpr
, &tx
->tx_sync_more_cv
, timer
);
497 delta
= ddi_get_lbolt() - start
;
498 timer
= (delta
> timeout
? 0 : timeout
- delta
);
502 * Wait until the quiesce thread hands off a txg to us,
503 * prompting it to do so if necessary.
505 while (!tx
->tx_exiting
&& tx
->tx_quiesced_txg
== 0) {
506 if (tx
->tx_quiesce_txg_waiting
< tx
->tx_open_txg
+1)
507 tx
->tx_quiesce_txg_waiting
= tx
->tx_open_txg
+1;
508 cv_broadcast(&tx
->tx_quiesce_more_cv
);
509 txg_thread_wait(tx
, &cpr
, &tx
->tx_quiesce_done_cv
, 0);
512 if (tx
->tx_exiting
) {
513 kmem_free(vs2
, sizeof(vdev_stat_t
));
514 kmem_free(vs1
, sizeof(vdev_stat_t
));
515 txg_thread_exit(tx
, &cpr
, &tx
->tx_sync_thread
);
518 vdev_get_stats(spa
->spa_root_vdev
, vs1
);
521 * Consume the quiesced txg which has been handed off to
522 * us. This may cause the quiescing thread to now be
523 * able to quiesce another txg, so we must signal it.
525 txg
= tx
->tx_quiesced_txg
;
526 tx
->tx_quiesced_txg
= 0;
527 tx
->tx_syncing_txg
= txg
;
528 cv_broadcast(&tx
->tx_quiesce_more_cv
);
530 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
531 txg
, tx
->tx_quiesce_txg_waiting
, tx
->tx_sync_txg_waiting
);
532 mutex_exit(&tx
->tx_sync_lock
);
534 start
= ddi_get_lbolt();
536 delta
= ddi_get_lbolt() - start
;
538 mutex_enter(&tx
->tx_sync_lock
);
539 tx
->tx_synced_txg
= txg
;
540 tx
->tx_syncing_txg
= 0;
541 cv_broadcast(&tx
->tx_sync_done_cv
);
544 * Dispatch commit callbacks to worker threads.
546 txg_dispatch_callbacks(dp
, txg
);
548 vdev_get_stats(spa
->spa_root_vdev
, vs2
);
549 spa_txg_history_set_io(spa
, txg
,
550 vs2
->vs_bytes
[ZIO_TYPE_READ
]-vs1
->vs_bytes
[ZIO_TYPE_READ
],
551 vs2
->vs_bytes
[ZIO_TYPE_WRITE
]-vs1
->vs_bytes
[ZIO_TYPE_WRITE
],
552 vs2
->vs_ops
[ZIO_TYPE_READ
]-vs1
->vs_ops
[ZIO_TYPE_READ
],
553 vs2
->vs_ops
[ZIO_TYPE_WRITE
]-vs1
->vs_ops
[ZIO_TYPE_WRITE
],
554 dp
->dp_space_towrite
[txg
& TXG_MASK
] +
555 dp
->dp_tempreserved
[txg
& TXG_MASK
] / 2);
556 spa_txg_history_set(spa
, txg
, TXG_STATE_SYNCED
, gethrtime());
561 txg_quiesce_thread(dsl_pool_t
*dp
)
563 tx_state_t
*tx
= &dp
->dp_tx
;
566 txg_thread_enter(tx
, &cpr
);
572 * We quiesce when there's someone waiting on us.
573 * However, we can only have one txg in "quiescing" or
574 * "quiesced, waiting to sync" state. So we wait until
575 * the "quiesced, waiting to sync" txg has been consumed
576 * by the sync thread.
578 while (!tx
->tx_exiting
&&
579 (tx
->tx_open_txg
>= tx
->tx_quiesce_txg_waiting
||
580 tx
->tx_quiesced_txg
!= 0))
581 txg_thread_wait(tx
, &cpr
, &tx
->tx_quiesce_more_cv
, 0);
584 txg_thread_exit(tx
, &cpr
, &tx
->tx_quiesce_thread
);
586 txg
= tx
->tx_open_txg
;
587 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
588 txg
, tx
->tx_quiesce_txg_waiting
,
589 tx
->tx_sync_txg_waiting
);
590 mutex_exit(&tx
->tx_sync_lock
);
591 txg_quiesce(dp
, txg
);
592 mutex_enter(&tx
->tx_sync_lock
);
595 * Hand this txg off to the sync thread.
597 dprintf("quiesce done, handing off txg %llu\n", txg
);
598 tx
->tx_quiesced_txg
= txg
;
599 cv_broadcast(&tx
->tx_sync_more_cv
);
600 cv_broadcast(&tx
->tx_quiesce_done_cv
);
605 * Delay this thread by 'ticks' if we are still in the open transaction
606 * group and there is already a waiting txg quiesing or quiesced. Abort
607 * the delay if this txg stalls or enters the quiesing state.
610 txg_delay(dsl_pool_t
*dp
, uint64_t txg
, int ticks
)
612 tx_state_t
*tx
= &dp
->dp_tx
;
613 clock_t timeout
= ddi_get_lbolt() + ticks
;
615 /* don't delay if this txg could transition to quiesing immediately */
616 if (tx
->tx_open_txg
> txg
||
617 tx
->tx_syncing_txg
== txg
-1 || tx
->tx_synced_txg
== txg
-1)
620 mutex_enter(&tx
->tx_sync_lock
);
621 if (tx
->tx_open_txg
> txg
|| tx
->tx_synced_txg
== txg
-1) {
622 mutex_exit(&tx
->tx_sync_lock
);
626 while (ddi_get_lbolt() < timeout
&&
627 tx
->tx_syncing_txg
< txg
-1 && !txg_stalled(dp
))
628 (void) cv_timedwait(&tx
->tx_quiesce_more_cv
, &tx
->tx_sync_lock
,
631 DMU_TX_STAT_BUMP(dmu_tx_delay
);
633 mutex_exit(&tx
->tx_sync_lock
);
637 txg_wait_synced(dsl_pool_t
*dp
, uint64_t txg
)
639 tx_state_t
*tx
= &dp
->dp_tx
;
641 ASSERT(!dsl_pool_config_held(dp
));
643 mutex_enter(&tx
->tx_sync_lock
);
644 ASSERT(tx
->tx_threads
== 2);
646 txg
= tx
->tx_open_txg
+ TXG_DEFER_SIZE
;
647 if (tx
->tx_sync_txg_waiting
< txg
)
648 tx
->tx_sync_txg_waiting
= txg
;
649 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
650 txg
, tx
->tx_quiesce_txg_waiting
, tx
->tx_sync_txg_waiting
);
651 while (tx
->tx_synced_txg
< txg
) {
652 dprintf("broadcasting sync more "
653 "tx_synced=%llu waiting=%llu dp=%p\n",
654 tx
->tx_synced_txg
, tx
->tx_sync_txg_waiting
, dp
);
655 cv_broadcast(&tx
->tx_sync_more_cv
);
656 cv_wait(&tx
->tx_sync_done_cv
, &tx
->tx_sync_lock
);
658 mutex_exit(&tx
->tx_sync_lock
);
662 txg_wait_open(dsl_pool_t
*dp
, uint64_t txg
)
664 tx_state_t
*tx
= &dp
->dp_tx
;
666 ASSERT(!dsl_pool_config_held(dp
));
668 mutex_enter(&tx
->tx_sync_lock
);
669 ASSERT(tx
->tx_threads
== 2);
671 txg
= tx
->tx_open_txg
+ 1;
672 if (tx
->tx_quiesce_txg_waiting
< txg
)
673 tx
->tx_quiesce_txg_waiting
= txg
;
674 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
675 txg
, tx
->tx_quiesce_txg_waiting
, tx
->tx_sync_txg_waiting
);
676 while (tx
->tx_open_txg
< txg
) {
677 cv_broadcast(&tx
->tx_quiesce_more_cv
);
678 cv_wait(&tx
->tx_quiesce_done_cv
, &tx
->tx_sync_lock
);
680 mutex_exit(&tx
->tx_sync_lock
);
684 txg_stalled(dsl_pool_t
*dp
)
686 tx_state_t
*tx
= &dp
->dp_tx
;
687 return (tx
->tx_quiesce_txg_waiting
> tx
->tx_open_txg
);
691 txg_sync_waiting(dsl_pool_t
*dp
)
693 tx_state_t
*tx
= &dp
->dp_tx
;
695 return (tx
->tx_syncing_txg
<= tx
->tx_sync_txg_waiting
||
696 tx
->tx_quiesced_txg
!= 0);
700 * Per-txg object lists.
703 txg_list_create(txg_list_t
*tl
, size_t offset
)
707 mutex_init(&tl
->tl_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
709 tl
->tl_offset
= offset
;
711 for (t
= 0; t
< TXG_SIZE
; t
++)
712 tl
->tl_head
[t
] = NULL
;
716 txg_list_destroy(txg_list_t
*tl
)
720 for (t
= 0; t
< TXG_SIZE
; t
++)
721 ASSERT(txg_list_empty(tl
, t
));
723 mutex_destroy(&tl
->tl_lock
);
727 txg_list_empty(txg_list_t
*tl
, uint64_t txg
)
729 return (tl
->tl_head
[txg
& TXG_MASK
] == NULL
);
733 * Add an entry to the list (unless it's already on the list).
734 * Returns B_TRUE if it was actually added.
737 txg_list_add(txg_list_t
*tl
, void *p
, uint64_t txg
)
739 int t
= txg
& TXG_MASK
;
740 txg_node_t
*tn
= (txg_node_t
*)((char *)p
+ tl
->tl_offset
);
743 mutex_enter(&tl
->tl_lock
);
744 add
= (tn
->tn_member
[t
] == 0);
746 tn
->tn_member
[t
] = 1;
747 tn
->tn_next
[t
] = tl
->tl_head
[t
];
750 mutex_exit(&tl
->tl_lock
);
756 * Add an entry to the end of the list, unless it's already on the list.
757 * (walks list to find end)
758 * Returns B_TRUE if it was actually added.
761 txg_list_add_tail(txg_list_t
*tl
, void *p
, uint64_t txg
)
763 int t
= txg
& TXG_MASK
;
764 txg_node_t
*tn
= (txg_node_t
*)((char *)p
+ tl
->tl_offset
);
767 mutex_enter(&tl
->tl_lock
);
768 add
= (tn
->tn_member
[t
] == 0);
772 for (tp
= &tl
->tl_head
[t
]; *tp
!= NULL
; tp
= &(*tp
)->tn_next
[t
])
775 tn
->tn_member
[t
] = 1;
776 tn
->tn_next
[t
] = NULL
;
779 mutex_exit(&tl
->tl_lock
);
785 * Remove the head of the list and return it.
788 txg_list_remove(txg_list_t
*tl
, uint64_t txg
)
790 int t
= txg
& TXG_MASK
;
794 mutex_enter(&tl
->tl_lock
);
795 if ((tn
= tl
->tl_head
[t
]) != NULL
) {
796 p
= (char *)tn
- tl
->tl_offset
;
797 tl
->tl_head
[t
] = tn
->tn_next
[t
];
798 tn
->tn_next
[t
] = NULL
;
799 tn
->tn_member
[t
] = 0;
801 mutex_exit(&tl
->tl_lock
);
807 * Remove a specific item from the list and return it.
810 txg_list_remove_this(txg_list_t
*tl
, void *p
, uint64_t txg
)
812 int t
= txg
& TXG_MASK
;
813 txg_node_t
*tn
, **tp
;
815 mutex_enter(&tl
->tl_lock
);
817 for (tp
= &tl
->tl_head
[t
]; (tn
= *tp
) != NULL
; tp
= &tn
->tn_next
[t
]) {
818 if ((char *)tn
- tl
->tl_offset
== p
) {
819 *tp
= tn
->tn_next
[t
];
820 tn
->tn_next
[t
] = NULL
;
821 tn
->tn_member
[t
] = 0;
822 mutex_exit(&tl
->tl_lock
);
827 mutex_exit(&tl
->tl_lock
);
833 txg_list_member(txg_list_t
*tl
, void *p
, uint64_t txg
)
835 int t
= txg
& TXG_MASK
;
836 txg_node_t
*tn
= (txg_node_t
*)((char *)p
+ tl
->tl_offset
);
838 return (tn
->tn_member
[t
] != 0);
842 * Walk a txg list -- only safe if you know it's not changing.
845 txg_list_head(txg_list_t
*tl
, uint64_t txg
)
847 int t
= txg
& TXG_MASK
;
848 txg_node_t
*tn
= tl
->tl_head
[t
];
850 return (tn
== NULL
? NULL
: (char *)tn
- tl
->tl_offset
);
854 txg_list_next(txg_list_t
*tl
, void *p
, uint64_t txg
)
856 int t
= txg
& TXG_MASK
;
857 txg_node_t
*tn
= (txg_node_t
*)((char *)p
+ tl
->tl_offset
);
861 return (tn
== NULL
? NULL
: (char *)tn
- tl
->tl_offset
);
864 #if defined(_KERNEL) && defined(HAVE_SPL)
865 EXPORT_SYMBOL(txg_init
);
866 EXPORT_SYMBOL(txg_fini
);
867 EXPORT_SYMBOL(txg_sync_start
);
868 EXPORT_SYMBOL(txg_sync_stop
);
869 EXPORT_SYMBOL(txg_hold_open
);
870 EXPORT_SYMBOL(txg_rele_to_quiesce
);
871 EXPORT_SYMBOL(txg_rele_to_sync
);
872 EXPORT_SYMBOL(txg_register_callbacks
);
873 EXPORT_SYMBOL(txg_delay
);
874 EXPORT_SYMBOL(txg_wait_synced
);
875 EXPORT_SYMBOL(txg_wait_open
);
876 EXPORT_SYMBOL(txg_wait_callbacks
);
877 EXPORT_SYMBOL(txg_stalled
);
878 EXPORT_SYMBOL(txg_sync_waiting
);
880 module_param(zfs_txg_timeout
, int, 0644);
881 MODULE_PARM_DESC(zfs_txg_timeout
, "Max seconds worth of delta per txg");