]>
git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/txg.c
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/zfs_context.h>
26 #include <sys/txg_impl.h>
27 #include <sys/dmu_impl.h>
28 #include <sys/dmu_tx.h>
29 #include <sys/dsl_pool.h>
30 #include <sys/dsl_scan.h>
31 #include <sys/callb.h>
34 * Pool-wide transaction groups.
37 static void txg_sync_thread(dsl_pool_t
*dp
);
38 static void txg_quiesce_thread(dsl_pool_t
*dp
);
40 int zfs_txg_timeout
= 5; /* max seconds worth of delta per txg */
43 * Prepare the txg subsystem.
46 txg_init(dsl_pool_t
*dp
, uint64_t txg
)
48 tx_state_t
*tx
= &dp
->dp_tx
;
50 bzero(tx
, sizeof (tx_state_t
));
52 tx
->tx_cpu
= vmem_zalloc(max_ncpus
* sizeof (tx_cpu_t
), KM_SLEEP
);
54 for (c
= 0; c
< max_ncpus
; c
++) {
57 mutex_init(&tx
->tx_cpu
[c
].tc_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
58 for (i
= 0; i
< TXG_SIZE
; i
++) {
59 cv_init(&tx
->tx_cpu
[c
].tc_cv
[i
], NULL
, CV_DEFAULT
,
61 list_create(&tx
->tx_cpu
[c
].tc_callbacks
[i
],
62 sizeof (dmu_tx_callback_t
),
63 offsetof(dmu_tx_callback_t
, dcb_node
));
67 mutex_init(&tx
->tx_sync_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
69 cv_init(&tx
->tx_sync_more_cv
, NULL
, CV_DEFAULT
, NULL
);
70 cv_init(&tx
->tx_sync_done_cv
, NULL
, CV_DEFAULT
, NULL
);
71 cv_init(&tx
->tx_quiesce_more_cv
, NULL
, CV_DEFAULT
, NULL
);
72 cv_init(&tx
->tx_quiesce_done_cv
, NULL
, CV_DEFAULT
, NULL
);
73 cv_init(&tx
->tx_exit_cv
, NULL
, CV_DEFAULT
, NULL
);
75 tx
->tx_open_txg
= txg
;
79 * Close down the txg subsystem.
82 txg_fini(dsl_pool_t
*dp
)
84 tx_state_t
*tx
= &dp
->dp_tx
;
87 ASSERT(tx
->tx_threads
== 0);
89 mutex_destroy(&tx
->tx_sync_lock
);
91 cv_destroy(&tx
->tx_sync_more_cv
);
92 cv_destroy(&tx
->tx_sync_done_cv
);
93 cv_destroy(&tx
->tx_quiesce_more_cv
);
94 cv_destroy(&tx
->tx_quiesce_done_cv
);
95 cv_destroy(&tx
->tx_exit_cv
);
97 for (c
= 0; c
< max_ncpus
; c
++) {
100 mutex_destroy(&tx
->tx_cpu
[c
].tc_lock
);
101 for (i
= 0; i
< TXG_SIZE
; i
++) {
102 cv_destroy(&tx
->tx_cpu
[c
].tc_cv
[i
]);
103 list_destroy(&tx
->tx_cpu
[c
].tc_callbacks
[i
]);
107 if (tx
->tx_commit_cb_taskq
!= NULL
)
108 taskq_destroy(tx
->tx_commit_cb_taskq
);
110 vmem_free(tx
->tx_cpu
, max_ncpus
* sizeof (tx_cpu_t
));
112 bzero(tx
, sizeof (tx_state_t
));
116 * Start syncing transaction groups.
119 txg_sync_start(dsl_pool_t
*dp
)
121 tx_state_t
*tx
= &dp
->dp_tx
;
123 mutex_enter(&tx
->tx_sync_lock
);
125 dprintf("pool %p\n", dp
);
127 ASSERT(tx
->tx_threads
== 0);
131 tx
->tx_quiesce_thread
= thread_create(NULL
, 0, txg_quiesce_thread
,
132 dp
, 0, &p0
, TS_RUN
, minclsyspri
);
135 * The sync thread can need a larger-than-default stack size on
136 * 32-bit x86. This is due in part to nested pools and
137 * scrub_visitbp() recursion.
139 tx
->tx_sync_thread
= thread_create(NULL
, 32<<10, txg_sync_thread
,
140 dp
, 0, &p0
, TS_RUN
, minclsyspri
);
142 mutex_exit(&tx
->tx_sync_lock
);
146 txg_thread_enter(tx_state_t
*tx
, callb_cpr_t
*cpr
)
148 CALLB_CPR_INIT(cpr
, &tx
->tx_sync_lock
, callb_generic_cpr
, FTAG
);
149 mutex_enter(&tx
->tx_sync_lock
);
153 txg_thread_exit(tx_state_t
*tx
, callb_cpr_t
*cpr
, kthread_t
**tpp
)
155 ASSERT(*tpp
!= NULL
);
158 cv_broadcast(&tx
->tx_exit_cv
);
159 CALLB_CPR_EXIT(cpr
); /* drops &tx->tx_sync_lock */
164 txg_thread_wait(tx_state_t
*tx
, callb_cpr_t
*cpr
, kcondvar_t
*cv
, uint64_t time
)
166 CALLB_CPR_SAFE_BEGIN(cpr
);
169 (void) cv_timedwait_interruptible(cv
, &tx
->tx_sync_lock
,
170 ddi_get_lbolt() + time
);
172 cv_wait_interruptible(cv
, &tx
->tx_sync_lock
);
174 CALLB_CPR_SAFE_END(cpr
, &tx
->tx_sync_lock
);
178 * Stop syncing transaction groups.
181 txg_sync_stop(dsl_pool_t
*dp
)
183 tx_state_t
*tx
= &dp
->dp_tx
;
185 dprintf("pool %p\n", dp
);
187 * Finish off any work in progress.
189 ASSERT(tx
->tx_threads
== 2);
192 * We need to ensure that we've vacated the deferred space_maps.
194 txg_wait_synced(dp
, tx
->tx_open_txg
+ TXG_DEFER_SIZE
);
197 * Wake all sync threads and wait for them to die.
199 mutex_enter(&tx
->tx_sync_lock
);
201 ASSERT(tx
->tx_threads
== 2);
205 cv_broadcast(&tx
->tx_quiesce_more_cv
);
206 cv_broadcast(&tx
->tx_quiesce_done_cv
);
207 cv_broadcast(&tx
->tx_sync_more_cv
);
209 while (tx
->tx_threads
!= 0)
210 cv_wait(&tx
->tx_exit_cv
, &tx
->tx_sync_lock
);
214 mutex_exit(&tx
->tx_sync_lock
);
218 txg_hold_open(dsl_pool_t
*dp
, txg_handle_t
*th
)
220 tx_state_t
*tx
= &dp
->dp_tx
;
225 * It appears the processor id is simply used as a "random"
226 * number to index into the array, and there isn't any other
227 * significance to the chosen tx_cpu. Because.. Why not use
228 * the current cpu to index into the array?
231 tc
= &tx
->tx_cpu
[CPU_SEQID
];
234 mutex_enter(&tc
->tc_lock
);
236 txg
= tx
->tx_open_txg
;
237 tc
->tc_count
[txg
& TXG_MASK
]++;
246 txg_rele_to_quiesce(txg_handle_t
*th
)
248 tx_cpu_t
*tc
= th
->th_cpu
;
250 mutex_exit(&tc
->tc_lock
);
254 txg_register_callbacks(txg_handle_t
*th
, list_t
*tx_callbacks
)
256 tx_cpu_t
*tc
= th
->th_cpu
;
257 int g
= th
->th_txg
& TXG_MASK
;
259 mutex_enter(&tc
->tc_lock
);
260 list_move_tail(&tc
->tc_callbacks
[g
], tx_callbacks
);
261 mutex_exit(&tc
->tc_lock
);
265 txg_rele_to_sync(txg_handle_t
*th
)
267 tx_cpu_t
*tc
= th
->th_cpu
;
268 int g
= th
->th_txg
& TXG_MASK
;
270 mutex_enter(&tc
->tc_lock
);
271 ASSERT(tc
->tc_count
[g
] != 0);
272 if (--tc
->tc_count
[g
] == 0)
273 cv_broadcast(&tc
->tc_cv
[g
]);
274 mutex_exit(&tc
->tc_lock
);
276 th
->th_cpu
= NULL
; /* defensive */
280 txg_quiesce(dsl_pool_t
*dp
, uint64_t txg
)
282 tx_state_t
*tx
= &dp
->dp_tx
;
283 int g
= txg
& TXG_MASK
;
287 * Grab all tx_cpu locks so nobody else can get into this txg.
289 for (c
= 0; c
< max_ncpus
; c
++)
290 mutex_enter(&tx
->tx_cpu
[c
].tc_lock
);
292 ASSERT(txg
== tx
->tx_open_txg
);
296 * Now that we've incremented tx_open_txg, we can let threads
297 * enter the next transaction group.
299 for (c
= 0; c
< max_ncpus
; c
++)
300 mutex_exit(&tx
->tx_cpu
[c
].tc_lock
);
303 * Quiesce the transaction group by waiting for everyone to txg_exit().
305 for (c
= 0; c
< max_ncpus
; c
++) {
306 tx_cpu_t
*tc
= &tx
->tx_cpu
[c
];
307 mutex_enter(&tc
->tc_lock
);
308 while (tc
->tc_count
[g
] != 0)
309 cv_wait(&tc
->tc_cv
[g
], &tc
->tc_lock
);
310 mutex_exit(&tc
->tc_lock
);
315 txg_do_callbacks(list_t
*cb_list
)
317 dmu_tx_do_callbacks(cb_list
, 0);
319 list_destroy(cb_list
);
321 kmem_free(cb_list
, sizeof (list_t
));
325 * Dispatch the commit callbacks registered on this txg to worker threads.
328 txg_dispatch_callbacks(dsl_pool_t
*dp
, uint64_t txg
)
331 tx_state_t
*tx
= &dp
->dp_tx
;
334 for (c
= 0; c
< max_ncpus
; c
++) {
335 tx_cpu_t
*tc
= &tx
->tx_cpu
[c
];
336 /* No need to lock tx_cpu_t at this point */
338 int g
= txg
& TXG_MASK
;
340 if (list_is_empty(&tc
->tc_callbacks
[g
]))
343 if (tx
->tx_commit_cb_taskq
== NULL
) {
345 * Commit callback taskq hasn't been created yet.
347 tx
->tx_commit_cb_taskq
= taskq_create("tx_commit_cb",
348 100, minclsyspri
, max_ncpus
, INT_MAX
,
349 TASKQ_THREADS_CPU_PCT
| TASKQ_PREPOPULATE
);
352 cb_list
= kmem_alloc(sizeof (list_t
), KM_PUSHPAGE
);
353 list_create(cb_list
, sizeof (dmu_tx_callback_t
),
354 offsetof(dmu_tx_callback_t
, dcb_node
));
356 list_move_tail(cb_list
, &tc
->tc_callbacks
[g
]);
358 (void) taskq_dispatch(tx
->tx_commit_cb_taskq
, (task_func_t
*)
359 txg_do_callbacks
, cb_list
, TQ_SLEEP
);
364 * Wait for pending commit callbacks of already-synced transactions to finish
366 * Calling this function from within a commit callback will deadlock.
369 txg_wait_callbacks(dsl_pool_t
*dp
)
371 tx_state_t
*tx
= &dp
->dp_tx
;
373 if (tx
->tx_commit_cb_taskq
!= NULL
)
374 taskq_wait(tx
->tx_commit_cb_taskq
);
378 txg_sync_thread(dsl_pool_t
*dp
)
380 spa_t
*spa
= dp
->dp_spa
;
381 tx_state_t
*tx
= &dp
->dp_tx
;
383 uint64_t start
, delta
;
387 * Annotate this process with a flag that indicates that it is
388 * unsafe to use KM_SLEEP during memory allocations due to the
389 * potential for a deadlock. KM_PUSHPAGE should be used instead.
391 current
->flags
|= PF_NOFS
;
394 txg_thread_enter(tx
, &cpr
);
398 uint64_t timer
, timeout
;
401 timeout
= zfs_txg_timeout
* hz
;
404 * We sync when we're scanning, there's someone waiting
405 * on us, or the quiesce thread has handed off a txg to
406 * us, or we have reached our timeout.
408 timer
= (delta
>= timeout
? 0 : timeout
- delta
);
409 while (!dsl_scan_active(dp
->dp_scan
) &&
410 !tx
->tx_exiting
&& timer
> 0 &&
411 tx
->tx_synced_txg
>= tx
->tx_sync_txg_waiting
&&
412 tx
->tx_quiesced_txg
== 0) {
413 dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
414 tx
->tx_synced_txg
, tx
->tx_sync_txg_waiting
, dp
);
415 txg_thread_wait(tx
, &cpr
, &tx
->tx_sync_more_cv
, timer
);
416 delta
= ddi_get_lbolt() - start
;
417 timer
= (delta
> timeout
? 0 : timeout
- delta
);
421 * Wait until the quiesce thread hands off a txg to us,
422 * prompting it to do so if necessary.
424 while (!tx
->tx_exiting
&& tx
->tx_quiesced_txg
== 0) {
425 if (tx
->tx_quiesce_txg_waiting
< tx
->tx_open_txg
+1)
426 tx
->tx_quiesce_txg_waiting
= tx
->tx_open_txg
+1;
427 cv_broadcast(&tx
->tx_quiesce_more_cv
);
428 txg_thread_wait(tx
, &cpr
, &tx
->tx_quiesce_done_cv
, 0);
432 txg_thread_exit(tx
, &cpr
, &tx
->tx_sync_thread
);
435 * Consume the quiesced txg which has been handed off to
436 * us. This may cause the quiescing thread to now be
437 * able to quiesce another txg, so we must signal it.
439 txg
= tx
->tx_quiesced_txg
;
440 tx
->tx_quiesced_txg
= 0;
441 tx
->tx_syncing_txg
= txg
;
442 cv_broadcast(&tx
->tx_quiesce_more_cv
);
444 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
445 txg
, tx
->tx_quiesce_txg_waiting
, tx
->tx_sync_txg_waiting
);
446 mutex_exit(&tx
->tx_sync_lock
);
448 start
= ddi_get_lbolt();
450 delta
= ddi_get_lbolt() - start
;
452 mutex_enter(&tx
->tx_sync_lock
);
453 tx
->tx_synced_txg
= txg
;
454 tx
->tx_syncing_txg
= 0;
455 cv_broadcast(&tx
->tx_sync_done_cv
);
458 * Dispatch commit callbacks to worker threads.
460 txg_dispatch_callbacks(dp
, txg
);
465 txg_quiesce_thread(dsl_pool_t
*dp
)
467 tx_state_t
*tx
= &dp
->dp_tx
;
470 txg_thread_enter(tx
, &cpr
);
476 * We quiesce when there's someone waiting on us.
477 * However, we can only have one txg in "quiescing" or
478 * "quiesced, waiting to sync" state. So we wait until
479 * the "quiesced, waiting to sync" txg has been consumed
480 * by the sync thread.
482 while (!tx
->tx_exiting
&&
483 (tx
->tx_open_txg
>= tx
->tx_quiesce_txg_waiting
||
484 tx
->tx_quiesced_txg
!= 0))
485 txg_thread_wait(tx
, &cpr
, &tx
->tx_quiesce_more_cv
, 0);
488 txg_thread_exit(tx
, &cpr
, &tx
->tx_quiesce_thread
);
490 txg
= tx
->tx_open_txg
;
491 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
492 txg
, tx
->tx_quiesce_txg_waiting
,
493 tx
->tx_sync_txg_waiting
);
494 mutex_exit(&tx
->tx_sync_lock
);
495 txg_quiesce(dp
, txg
);
496 mutex_enter(&tx
->tx_sync_lock
);
499 * Hand this txg off to the sync thread.
501 dprintf("quiesce done, handing off txg %llu\n", txg
);
502 tx
->tx_quiesced_txg
= txg
;
503 cv_broadcast(&tx
->tx_sync_more_cv
);
504 cv_broadcast(&tx
->tx_quiesce_done_cv
);
509 * Delay this thread by 'ticks' if we are still in the open transaction
510 * group and there is already a waiting txg quiesing or quiesced. Abort
511 * the delay if this txg stalls or enters the quiesing state.
514 txg_delay(dsl_pool_t
*dp
, uint64_t txg
, int ticks
)
516 tx_state_t
*tx
= &dp
->dp_tx
;
517 clock_t timeout
= ddi_get_lbolt() + ticks
;
519 /* don't delay if this txg could transition to quiesing immediately */
520 if (tx
->tx_open_txg
> txg
||
521 tx
->tx_syncing_txg
== txg
-1 || tx
->tx_synced_txg
== txg
-1)
524 mutex_enter(&tx
->tx_sync_lock
);
525 if (tx
->tx_open_txg
> txg
|| tx
->tx_synced_txg
== txg
-1) {
526 mutex_exit(&tx
->tx_sync_lock
);
530 while (ddi_get_lbolt() < timeout
&&
531 tx
->tx_syncing_txg
< txg
-1 && !txg_stalled(dp
))
532 (void) cv_timedwait(&tx
->tx_quiesce_more_cv
, &tx
->tx_sync_lock
,
535 DMU_TX_STAT_BUMP(dmu_tx_delay
);
537 mutex_exit(&tx
->tx_sync_lock
);
541 txg_wait_synced(dsl_pool_t
*dp
, uint64_t txg
)
543 tx_state_t
*tx
= &dp
->dp_tx
;
545 mutex_enter(&tx
->tx_sync_lock
);
546 ASSERT(tx
->tx_threads
== 2);
548 txg
= tx
->tx_open_txg
+ TXG_DEFER_SIZE
;
549 if (tx
->tx_sync_txg_waiting
< txg
)
550 tx
->tx_sync_txg_waiting
= txg
;
551 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
552 txg
, tx
->tx_quiesce_txg_waiting
, tx
->tx_sync_txg_waiting
);
553 while (tx
->tx_synced_txg
< txg
) {
554 dprintf("broadcasting sync more "
555 "tx_synced=%llu waiting=%llu dp=%p\n",
556 tx
->tx_synced_txg
, tx
->tx_sync_txg_waiting
, dp
);
557 cv_broadcast(&tx
->tx_sync_more_cv
);
558 cv_wait(&tx
->tx_sync_done_cv
, &tx
->tx_sync_lock
);
560 mutex_exit(&tx
->tx_sync_lock
);
564 txg_wait_open(dsl_pool_t
*dp
, uint64_t txg
)
566 tx_state_t
*tx
= &dp
->dp_tx
;
568 mutex_enter(&tx
->tx_sync_lock
);
569 ASSERT(tx
->tx_threads
== 2);
571 txg
= tx
->tx_open_txg
+ 1;
572 if (tx
->tx_quiesce_txg_waiting
< txg
)
573 tx
->tx_quiesce_txg_waiting
= txg
;
574 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
575 txg
, tx
->tx_quiesce_txg_waiting
, tx
->tx_sync_txg_waiting
);
576 while (tx
->tx_open_txg
< txg
) {
577 cv_broadcast(&tx
->tx_quiesce_more_cv
);
578 cv_wait(&tx
->tx_quiesce_done_cv
, &tx
->tx_sync_lock
);
580 mutex_exit(&tx
->tx_sync_lock
);
584 txg_stalled(dsl_pool_t
*dp
)
586 tx_state_t
*tx
= &dp
->dp_tx
;
587 return (tx
->tx_quiesce_txg_waiting
> tx
->tx_open_txg
);
591 txg_sync_waiting(dsl_pool_t
*dp
)
593 tx_state_t
*tx
= &dp
->dp_tx
;
595 return (tx
->tx_syncing_txg
<= tx
->tx_sync_txg_waiting
||
596 tx
->tx_quiesced_txg
!= 0);
600 * Per-txg object lists.
603 txg_list_create(txg_list_t
*tl
, size_t offset
)
607 mutex_init(&tl
->tl_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
609 tl
->tl_offset
= offset
;
611 for (t
= 0; t
< TXG_SIZE
; t
++)
612 tl
->tl_head
[t
] = NULL
;
616 txg_list_destroy(txg_list_t
*tl
)
620 for (t
= 0; t
< TXG_SIZE
; t
++)
621 ASSERT(txg_list_empty(tl
, t
));
623 mutex_destroy(&tl
->tl_lock
);
627 txg_list_empty(txg_list_t
*tl
, uint64_t txg
)
629 return (tl
->tl_head
[txg
& TXG_MASK
] == NULL
);
633 * Add an entry to the list.
634 * Returns 0 if it's a new entry, 1 if it's already there.
637 txg_list_add(txg_list_t
*tl
, void *p
, uint64_t txg
)
639 int t
= txg
& TXG_MASK
;
640 txg_node_t
*tn
= (txg_node_t
*)((char *)p
+ tl
->tl_offset
);
643 mutex_enter(&tl
->tl_lock
);
644 already_on_list
= tn
->tn_member
[t
];
645 if (!already_on_list
) {
646 tn
->tn_member
[t
] = 1;
647 tn
->tn_next
[t
] = tl
->tl_head
[t
];
650 mutex_exit(&tl
->tl_lock
);
652 return (already_on_list
);
656 * Add an entry to the end of the list (walks list to find end).
657 * Returns 0 if it's a new entry, 1 if it's already there.
660 txg_list_add_tail(txg_list_t
*tl
, void *p
, uint64_t txg
)
662 int t
= txg
& TXG_MASK
;
663 txg_node_t
*tn
= (txg_node_t
*)((char *)p
+ tl
->tl_offset
);
666 mutex_enter(&tl
->tl_lock
);
667 already_on_list
= tn
->tn_member
[t
];
668 if (!already_on_list
) {
671 for (tp
= &tl
->tl_head
[t
]; *tp
!= NULL
; tp
= &(*tp
)->tn_next
[t
])
674 tn
->tn_member
[t
] = 1;
675 tn
->tn_next
[t
] = NULL
;
678 mutex_exit(&tl
->tl_lock
);
680 return (already_on_list
);
684 * Remove the head of the list and return it.
687 txg_list_remove(txg_list_t
*tl
, uint64_t txg
)
689 int t
= txg
& TXG_MASK
;
693 mutex_enter(&tl
->tl_lock
);
694 if ((tn
= tl
->tl_head
[t
]) != NULL
) {
695 p
= (char *)tn
- tl
->tl_offset
;
696 tl
->tl_head
[t
] = tn
->tn_next
[t
];
697 tn
->tn_next
[t
] = NULL
;
698 tn
->tn_member
[t
] = 0;
700 mutex_exit(&tl
->tl_lock
);
706 * Remove a specific item from the list and return it.
709 txg_list_remove_this(txg_list_t
*tl
, void *p
, uint64_t txg
)
711 int t
= txg
& TXG_MASK
;
712 txg_node_t
*tn
, **tp
;
714 mutex_enter(&tl
->tl_lock
);
716 for (tp
= &tl
->tl_head
[t
]; (tn
= *tp
) != NULL
; tp
= &tn
->tn_next
[t
]) {
717 if ((char *)tn
- tl
->tl_offset
== p
) {
718 *tp
= tn
->tn_next
[t
];
719 tn
->tn_next
[t
] = NULL
;
720 tn
->tn_member
[t
] = 0;
721 mutex_exit(&tl
->tl_lock
);
726 mutex_exit(&tl
->tl_lock
);
732 txg_list_member(txg_list_t
*tl
, void *p
, uint64_t txg
)
734 int t
= txg
& TXG_MASK
;
735 txg_node_t
*tn
= (txg_node_t
*)((char *)p
+ tl
->tl_offset
);
737 return (tn
->tn_member
[t
]);
741 * Walk a txg list -- only safe if you know it's not changing.
744 txg_list_head(txg_list_t
*tl
, uint64_t txg
)
746 int t
= txg
& TXG_MASK
;
747 txg_node_t
*tn
= tl
->tl_head
[t
];
749 return (tn
== NULL
? NULL
: (char *)tn
- tl
->tl_offset
);
753 txg_list_next(txg_list_t
*tl
, void *p
, uint64_t txg
)
755 int t
= txg
& TXG_MASK
;
756 txg_node_t
*tn
= (txg_node_t
*)((char *)p
+ tl
->tl_offset
);
760 return (tn
== NULL
? NULL
: (char *)tn
- tl
->tl_offset
);
763 #if defined(_KERNEL) && defined(HAVE_SPL)
764 EXPORT_SYMBOL(txg_init
);
765 EXPORT_SYMBOL(txg_fini
);
766 EXPORT_SYMBOL(txg_sync_start
);
767 EXPORT_SYMBOL(txg_sync_stop
);
768 EXPORT_SYMBOL(txg_hold_open
);
769 EXPORT_SYMBOL(txg_rele_to_quiesce
);
770 EXPORT_SYMBOL(txg_rele_to_sync
);
771 EXPORT_SYMBOL(txg_register_callbacks
);
772 EXPORT_SYMBOL(txg_delay
);
773 EXPORT_SYMBOL(txg_wait_synced
);
774 EXPORT_SYMBOL(txg_wait_open
);
775 EXPORT_SYMBOL(txg_wait_callbacks
);
776 EXPORT_SYMBOL(txg_stalled
);
777 EXPORT_SYMBOL(txg_sync_waiting
);
779 module_param(zfs_txg_timeout
, int, 0644);
780 MODULE_PARM_DESC(zfs_txg_timeout
, "Max seconds worth of delta per txg");