]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/txg.c
Fix send/recv lost spill block
[mirror_zfs.git] / module / zfs / txg.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Portions Copyright 2011 Martin Matuska
24 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
25 */
26
27 #include <sys/zfs_context.h>
28 #include <sys/txg_impl.h>
29 #include <sys/dmu_impl.h>
30 #include <sys/spa_impl.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dsl_pool.h>
33 #include <sys/dsl_scan.h>
34 #include <sys/zil.h>
35 #include <sys/callb.h>
36 #include <sys/trace_txg.h>
37
38 /*
39 * ZFS Transaction Groups
40 * ----------------------
41 *
42 * ZFS transaction groups are, as the name implies, groups of transactions
43 * that act on persistent state. ZFS asserts consistency at the granularity of
44 * these transaction groups. Each successive transaction group (txg) is
45 * assigned a 64-bit consecutive identifier. There are three active
46 * transaction group states: open, quiescing, or syncing. At any given time,
47 * there may be an active txg associated with each state; each active txg may
48 * either be processing, or blocked waiting to enter the next state. There may
49 * be up to three active txgs, and there is always a txg in the open state
50 * (though it may be blocked waiting to enter the quiescing state). In broad
51 * strokes, transactions -- operations that change in-memory structures -- are
52 * accepted into the txg in the open state, and are completed while the txg is
53 * in the open or quiescing states. The accumulated changes are written to
54 * disk in the syncing state.
55 *
56 * Open
57 *
58 * When a new txg becomes active, it first enters the open state. New
59 * transactions -- updates to in-memory structures -- are assigned to the
60 * currently open txg. There is always a txg in the open state so that ZFS can
61 * accept new changes (though the txg may refuse new changes if it has hit
62 * some limit). ZFS advances the open txg to the next state for a variety of
63 * reasons such as it hitting a time or size threshold, or the execution of an
64 * administrative action that must be completed in the syncing state.
65 *
66 * Quiescing
67 *
68 * After a txg exits the open state, it enters the quiescing state. The
69 * quiescing state is intended to provide a buffer between accepting new
70 * transactions in the open state and writing them out to stable storage in
71 * the syncing state. While quiescing, transactions can continue their
72 * operation without delaying either of the other states. Typically, a txg is
73 * in the quiescing state very briefly since the operations are bounded by
74 * software latencies rather than, say, slower I/O latencies. After all
75 * transactions complete, the txg is ready to enter the next state.
76 *
77 * Syncing
78 *
79 * In the syncing state, the in-memory state built up during the open and (to
80 * a lesser degree) the quiescing states is written to stable storage. The
81 * process of writing out modified data can, in turn modify more data. For
82 * example when we write new blocks, we need to allocate space for them; those
83 * allocations modify metadata (space maps)... which themselves must be
84 * written to stable storage. During the sync state, ZFS iterates, writing out
85 * data until it converges and all in-memory changes have been written out.
86 * The first such pass is the largest as it encompasses all the modified user
87 * data (as opposed to filesystem metadata). Subsequent passes typically have
88 * far less data to write as they consist exclusively of filesystem metadata.
89 *
90 * To ensure convergence, after a certain number of passes ZFS begins
91 * overwriting locations on stable storage that had been allocated earlier in
92 * the syncing state (and subsequently freed). ZFS usually allocates new
93 * blocks to optimize for large, continuous, writes. For the syncing state to
94 * converge however it must complete a pass where no new blocks are allocated
95 * since each allocation requires a modification of persistent metadata.
96 * Further, to hasten convergence, after a prescribed number of passes, ZFS
97 * also defers frees, and stops compressing.
98 *
99 * In addition to writing out user data, we must also execute synctasks during
100 * the syncing context. A synctask is the mechanism by which some
101 * administrative activities work such as creating and destroying snapshots or
102 * datasets. Note that when a synctask is initiated it enters the open txg,
103 * and ZFS then pushes that txg as quickly as possible to completion of the
104 * syncing state in order to reduce the latency of the administrative
105 * activity. To complete the syncing state, ZFS writes out a new uberblock,
106 * the root of the tree of blocks that comprise all state stored on the ZFS
107 * pool. Finally, if there is a quiesced txg waiting, we signal that it can
108 * now transition to the syncing state.
109 */
110
111 static void txg_sync_thread(void *arg);
112 static void txg_quiesce_thread(void *arg);
113
114 int zfs_txg_timeout = 5; /* max seconds worth of delta per txg */
115
116 /*
117 * Prepare the txg subsystem.
118 */
119 void
120 txg_init(dsl_pool_t *dp, uint64_t txg)
121 {
122 tx_state_t *tx = &dp->dp_tx;
123 int c;
124 bzero(tx, sizeof (tx_state_t));
125
126 tx->tx_cpu = vmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP);
127
128 for (c = 0; c < max_ncpus; c++) {
129 int i;
130
131 mutex_init(&tx->tx_cpu[c].tc_lock, NULL, MUTEX_DEFAULT, NULL);
132 mutex_init(&tx->tx_cpu[c].tc_open_lock, NULL, MUTEX_NOLOCKDEP,
133 NULL);
134 for (i = 0; i < TXG_SIZE; i++) {
135 cv_init(&tx->tx_cpu[c].tc_cv[i], NULL, CV_DEFAULT,
136 NULL);
137 list_create(&tx->tx_cpu[c].tc_callbacks[i],
138 sizeof (dmu_tx_callback_t),
139 offsetof(dmu_tx_callback_t, dcb_node));
140 }
141 }
142
143 mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL);
144
145 cv_init(&tx->tx_sync_more_cv, NULL, CV_DEFAULT, NULL);
146 cv_init(&tx->tx_sync_done_cv, NULL, CV_DEFAULT, NULL);
147 cv_init(&tx->tx_quiesce_more_cv, NULL, CV_DEFAULT, NULL);
148 cv_init(&tx->tx_quiesce_done_cv, NULL, CV_DEFAULT, NULL);
149 cv_init(&tx->tx_exit_cv, NULL, CV_DEFAULT, NULL);
150
151 tx->tx_open_txg = txg;
152 }
153
154 /*
155 * Close down the txg subsystem.
156 */
157 void
158 txg_fini(dsl_pool_t *dp)
159 {
160 tx_state_t *tx = &dp->dp_tx;
161 int c;
162
163 ASSERT0(tx->tx_threads);
164
165 mutex_destroy(&tx->tx_sync_lock);
166
167 cv_destroy(&tx->tx_sync_more_cv);
168 cv_destroy(&tx->tx_sync_done_cv);
169 cv_destroy(&tx->tx_quiesce_more_cv);
170 cv_destroy(&tx->tx_quiesce_done_cv);
171 cv_destroy(&tx->tx_exit_cv);
172
173 for (c = 0; c < max_ncpus; c++) {
174 int i;
175
176 mutex_destroy(&tx->tx_cpu[c].tc_open_lock);
177 mutex_destroy(&tx->tx_cpu[c].tc_lock);
178 for (i = 0; i < TXG_SIZE; i++) {
179 cv_destroy(&tx->tx_cpu[c].tc_cv[i]);
180 list_destroy(&tx->tx_cpu[c].tc_callbacks[i]);
181 }
182 }
183
184 if (tx->tx_commit_cb_taskq != NULL)
185 taskq_destroy(tx->tx_commit_cb_taskq);
186
187 vmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t));
188
189 bzero(tx, sizeof (tx_state_t));
190 }
191
192 /*
193 * Start syncing transaction groups.
194 */
195 void
196 txg_sync_start(dsl_pool_t *dp)
197 {
198 tx_state_t *tx = &dp->dp_tx;
199
200 mutex_enter(&tx->tx_sync_lock);
201
202 dprintf("pool %p\n", dp);
203
204 ASSERT0(tx->tx_threads);
205
206 tx->tx_threads = 2;
207
208 tx->tx_quiesce_thread = thread_create(NULL, 0, txg_quiesce_thread,
209 dp, 0, &p0, TS_RUN, defclsyspri);
210
211 /*
212 * The sync thread can need a larger-than-default stack size on
213 * 32-bit x86. This is due in part to nested pools and
214 * scrub_visitbp() recursion.
215 */
216 tx->tx_sync_thread = thread_create(NULL, 0, txg_sync_thread,
217 dp, 0, &p0, TS_RUN, defclsyspri);
218
219 mutex_exit(&tx->tx_sync_lock);
220 }
221
222 static void
223 txg_thread_enter(tx_state_t *tx, callb_cpr_t *cpr)
224 {
225 CALLB_CPR_INIT(cpr, &tx->tx_sync_lock, callb_generic_cpr, FTAG);
226 mutex_enter(&tx->tx_sync_lock);
227 }
228
229 static void
230 txg_thread_exit(tx_state_t *tx, callb_cpr_t *cpr, kthread_t **tpp)
231 {
232 ASSERT(*tpp != NULL);
233 *tpp = NULL;
234 tx->tx_threads--;
235 cv_broadcast(&tx->tx_exit_cv);
236 CALLB_CPR_EXIT(cpr); /* drops &tx->tx_sync_lock */
237 thread_exit();
238 }
239
240 static void
241 txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, clock_t time)
242 {
243 CALLB_CPR_SAFE_BEGIN(cpr);
244
245 /*
246 * cv_wait_sig() is used instead of cv_wait() in order to prevent
247 * this process from incorrectly contributing to the system load
248 * average when idle.
249 */
250 if (time) {
251 (void) cv_timedwait_sig(cv, &tx->tx_sync_lock,
252 ddi_get_lbolt() + time);
253 } else {
254 cv_wait_sig(cv, &tx->tx_sync_lock);
255 }
256
257 CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock);
258 }
259
260 /*
261 * Stop syncing transaction groups.
262 */
263 void
264 txg_sync_stop(dsl_pool_t *dp)
265 {
266 tx_state_t *tx = &dp->dp_tx;
267
268 dprintf("pool %p\n", dp);
269 /*
270 * Finish off any work in progress.
271 */
272 ASSERT3U(tx->tx_threads, ==, 2);
273
274 /*
275 * We need to ensure that we've vacated the deferred space_maps.
276 */
277 txg_wait_synced(dp, tx->tx_open_txg + TXG_DEFER_SIZE);
278
279 /*
280 * Wake all sync threads and wait for them to die.
281 */
282 mutex_enter(&tx->tx_sync_lock);
283
284 ASSERT3U(tx->tx_threads, ==, 2);
285
286 tx->tx_exiting = 1;
287
288 cv_broadcast(&tx->tx_quiesce_more_cv);
289 cv_broadcast(&tx->tx_quiesce_done_cv);
290 cv_broadcast(&tx->tx_sync_more_cv);
291
292 while (tx->tx_threads != 0)
293 cv_wait(&tx->tx_exit_cv, &tx->tx_sync_lock);
294
295 tx->tx_exiting = 0;
296
297 mutex_exit(&tx->tx_sync_lock);
298 }
299
300 uint64_t
301 txg_hold_open(dsl_pool_t *dp, txg_handle_t *th)
302 {
303 tx_state_t *tx = &dp->dp_tx;
304 tx_cpu_t *tc;
305 uint64_t txg;
306
307 /*
308 * It appears the processor id is simply used as a "random"
309 * number to index into the array, and there isn't any other
310 * significance to the chosen tx_cpu. Because.. Why not use
311 * the current cpu to index into the array?
312 */
313 kpreempt_disable();
314 tc = &tx->tx_cpu[CPU_SEQID];
315 kpreempt_enable();
316
317 mutex_enter(&tc->tc_open_lock);
318 txg = tx->tx_open_txg;
319
320 mutex_enter(&tc->tc_lock);
321 tc->tc_count[txg & TXG_MASK]++;
322 mutex_exit(&tc->tc_lock);
323
324 th->th_cpu = tc;
325 th->th_txg = txg;
326
327 return (txg);
328 }
329
330 void
331 txg_rele_to_quiesce(txg_handle_t *th)
332 {
333 tx_cpu_t *tc = th->th_cpu;
334
335 ASSERT(!MUTEX_HELD(&tc->tc_lock));
336 mutex_exit(&tc->tc_open_lock);
337 }
338
339 void
340 txg_register_callbacks(txg_handle_t *th, list_t *tx_callbacks)
341 {
342 tx_cpu_t *tc = th->th_cpu;
343 int g = th->th_txg & TXG_MASK;
344
345 mutex_enter(&tc->tc_lock);
346 list_move_tail(&tc->tc_callbacks[g], tx_callbacks);
347 mutex_exit(&tc->tc_lock);
348 }
349
350 void
351 txg_rele_to_sync(txg_handle_t *th)
352 {
353 tx_cpu_t *tc = th->th_cpu;
354 int g = th->th_txg & TXG_MASK;
355
356 mutex_enter(&tc->tc_lock);
357 ASSERT(tc->tc_count[g] != 0);
358 if (--tc->tc_count[g] == 0)
359 cv_broadcast(&tc->tc_cv[g]);
360 mutex_exit(&tc->tc_lock);
361
362 th->th_cpu = NULL; /* defensive */
363 }
364
365 /*
366 * Blocks until all transactions in the group are committed.
367 *
368 * On return, the transaction group has reached a stable state in which it can
369 * then be passed off to the syncing context.
370 */
371 static void
372 txg_quiesce(dsl_pool_t *dp, uint64_t txg)
373 {
374 tx_state_t *tx = &dp->dp_tx;
375 uint64_t tx_open_time;
376 int g = txg & TXG_MASK;
377 int c;
378
379 /*
380 * Grab all tc_open_locks so nobody else can get into this txg.
381 */
382 for (c = 0; c < max_ncpus; c++)
383 mutex_enter(&tx->tx_cpu[c].tc_open_lock);
384
385 ASSERT(txg == tx->tx_open_txg);
386 tx->tx_open_txg++;
387 tx->tx_open_time = tx_open_time = gethrtime();
388
389 DTRACE_PROBE2(txg__quiescing, dsl_pool_t *, dp, uint64_t, txg);
390 DTRACE_PROBE2(txg__opened, dsl_pool_t *, dp, uint64_t, tx->tx_open_txg);
391
392 /*
393 * Now that we've incremented tx_open_txg, we can let threads
394 * enter the next transaction group.
395 */
396 for (c = 0; c < max_ncpus; c++)
397 mutex_exit(&tx->tx_cpu[c].tc_open_lock);
398
399 spa_txg_history_set(dp->dp_spa, txg, TXG_STATE_OPEN, tx_open_time);
400 spa_txg_history_add(dp->dp_spa, txg + 1, tx_open_time);
401
402 /*
403 * Quiesce the transaction group by waiting for everyone to txg_exit().
404 */
405 for (c = 0; c < max_ncpus; c++) {
406 tx_cpu_t *tc = &tx->tx_cpu[c];
407 mutex_enter(&tc->tc_lock);
408 while (tc->tc_count[g] != 0)
409 cv_wait(&tc->tc_cv[g], &tc->tc_lock);
410 mutex_exit(&tc->tc_lock);
411 }
412
413 spa_txg_history_set(dp->dp_spa, txg, TXG_STATE_QUIESCED, gethrtime());
414 }
415
416 static void
417 txg_do_callbacks(list_t *cb_list)
418 {
419 dmu_tx_do_callbacks(cb_list, 0);
420
421 list_destroy(cb_list);
422
423 kmem_free(cb_list, sizeof (list_t));
424 }
425
426 /*
427 * Dispatch the commit callbacks registered on this txg to worker threads.
428 *
429 * If no callbacks are registered for a given TXG, nothing happens.
430 * This function creates a taskq for the associated pool, if needed.
431 */
432 static void
433 txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg)
434 {
435 int c;
436 tx_state_t *tx = &dp->dp_tx;
437 list_t *cb_list;
438
439 for (c = 0; c < max_ncpus; c++) {
440 tx_cpu_t *tc = &tx->tx_cpu[c];
441 /*
442 * No need to lock tx_cpu_t at this point, since this can
443 * only be called once a txg has been synced.
444 */
445
446 int g = txg & TXG_MASK;
447
448 if (list_is_empty(&tc->tc_callbacks[g]))
449 continue;
450
451 if (tx->tx_commit_cb_taskq == NULL) {
452 /*
453 * Commit callback taskq hasn't been created yet.
454 */
455 tx->tx_commit_cb_taskq = taskq_create("tx_commit_cb",
456 max_ncpus, defclsyspri, max_ncpus, max_ncpus * 2,
457 TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
458 }
459
460 cb_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
461 list_create(cb_list, sizeof (dmu_tx_callback_t),
462 offsetof(dmu_tx_callback_t, dcb_node));
463
464 list_move_tail(cb_list, &tc->tc_callbacks[g]);
465
466 (void) taskq_dispatch(tx->tx_commit_cb_taskq, (task_func_t *)
467 txg_do_callbacks, cb_list, TQ_SLEEP);
468 }
469 }
470
471 /*
472 * Wait for pending commit callbacks of already-synced transactions to finish
473 * processing.
474 * Calling this function from within a commit callback will deadlock.
475 */
476 void
477 txg_wait_callbacks(dsl_pool_t *dp)
478 {
479 tx_state_t *tx = &dp->dp_tx;
480
481 if (tx->tx_commit_cb_taskq != NULL)
482 taskq_wait_outstanding(tx->tx_commit_cb_taskq, 0);
483 }
484
485 static boolean_t
486 txg_is_syncing(dsl_pool_t *dp)
487 {
488 tx_state_t *tx = &dp->dp_tx;
489 ASSERT(MUTEX_HELD(&tx->tx_sync_lock));
490 return (tx->tx_syncing_txg != 0);
491 }
492
493 static boolean_t
494 txg_is_quiescing(dsl_pool_t *dp)
495 {
496 tx_state_t *tx = &dp->dp_tx;
497 ASSERT(MUTEX_HELD(&tx->tx_sync_lock));
498 return (tx->tx_quiescing_txg != 0);
499 }
500
501 static boolean_t
502 txg_has_quiesced_to_sync(dsl_pool_t *dp)
503 {
504 tx_state_t *tx = &dp->dp_tx;
505 ASSERT(MUTEX_HELD(&tx->tx_sync_lock));
506 return (tx->tx_quiesced_txg != 0);
507 }
508
509 static void
510 txg_sync_thread(void *arg)
511 {
512 dsl_pool_t *dp = arg;
513 spa_t *spa = dp->dp_spa;
514 tx_state_t *tx = &dp->dp_tx;
515 callb_cpr_t cpr;
516 clock_t start, delta;
517
518 (void) spl_fstrans_mark();
519 txg_thread_enter(tx, &cpr);
520
521 start = delta = 0;
522 for (;;) {
523 clock_t timeout = zfs_txg_timeout * hz;
524 clock_t timer;
525 uint64_t txg;
526 uint64_t dirty_min_bytes =
527 zfs_dirty_data_max * zfs_dirty_data_sync_percent / 100;
528
529 /*
530 * We sync when we're scanning, there's someone waiting
531 * on us, or the quiesce thread has handed off a txg to
532 * us, or we have reached our timeout.
533 */
534 timer = (delta >= timeout ? 0 : timeout - delta);
535 while (!dsl_scan_active(dp->dp_scan) &&
536 !tx->tx_exiting && timer > 0 &&
537 tx->tx_synced_txg >= tx->tx_sync_txg_waiting &&
538 !txg_has_quiesced_to_sync(dp) &&
539 dp->dp_dirty_total < dirty_min_bytes) {
540 dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
541 tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
542 txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer);
543 delta = ddi_get_lbolt() - start;
544 timer = (delta > timeout ? 0 : timeout - delta);
545 }
546
547 /*
548 * Wait until the quiesce thread hands off a txg to us,
549 * prompting it to do so if necessary.
550 */
551 while (!tx->tx_exiting && !txg_has_quiesced_to_sync(dp)) {
552 if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1)
553 tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1;
554 cv_broadcast(&tx->tx_quiesce_more_cv);
555 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0);
556 }
557
558 if (tx->tx_exiting)
559 txg_thread_exit(tx, &cpr, &tx->tx_sync_thread);
560
561 /*
562 * Consume the quiesced txg which has been handed off to
563 * us. This may cause the quiescing thread to now be
564 * able to quiesce another txg, so we must signal it.
565 */
566 ASSERT(tx->tx_quiesced_txg != 0);
567 txg = tx->tx_quiesced_txg;
568 tx->tx_quiesced_txg = 0;
569 tx->tx_syncing_txg = txg;
570 DTRACE_PROBE2(txg__syncing, dsl_pool_t *, dp, uint64_t, txg);
571 cv_broadcast(&tx->tx_quiesce_more_cv);
572
573 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
574 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
575 mutex_exit(&tx->tx_sync_lock);
576
577 txg_stat_t *ts = spa_txg_history_init_io(spa, txg, dp);
578 start = ddi_get_lbolt();
579 spa_sync(spa, txg);
580 delta = ddi_get_lbolt() - start;
581 spa_txg_history_fini_io(spa, ts);
582
583 mutex_enter(&tx->tx_sync_lock);
584 tx->tx_synced_txg = txg;
585 tx->tx_syncing_txg = 0;
586 DTRACE_PROBE2(txg__synced, dsl_pool_t *, dp, uint64_t, txg);
587 cv_broadcast(&tx->tx_sync_done_cv);
588
589 /*
590 * Dispatch commit callbacks to worker threads.
591 */
592 txg_dispatch_callbacks(dp, txg);
593 }
594 }
595
596 static void
597 txg_quiesce_thread(void *arg)
598 {
599 dsl_pool_t *dp = arg;
600 tx_state_t *tx = &dp->dp_tx;
601 callb_cpr_t cpr;
602
603 txg_thread_enter(tx, &cpr);
604
605 for (;;) {
606 uint64_t txg;
607
608 /*
609 * We quiesce when there's someone waiting on us.
610 * However, we can only have one txg in "quiescing" or
611 * "quiesced, waiting to sync" state. So we wait until
612 * the "quiesced, waiting to sync" txg has been consumed
613 * by the sync thread.
614 */
615 while (!tx->tx_exiting &&
616 (tx->tx_open_txg >= tx->tx_quiesce_txg_waiting ||
617 txg_has_quiesced_to_sync(dp)))
618 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_more_cv, 0);
619
620 if (tx->tx_exiting)
621 txg_thread_exit(tx, &cpr, &tx->tx_quiesce_thread);
622
623 txg = tx->tx_open_txg;
624 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
625 txg, tx->tx_quiesce_txg_waiting,
626 tx->tx_sync_txg_waiting);
627 tx->tx_quiescing_txg = txg;
628
629 mutex_exit(&tx->tx_sync_lock);
630 txg_quiesce(dp, txg);
631 mutex_enter(&tx->tx_sync_lock);
632
633 /*
634 * Hand this txg off to the sync thread.
635 */
636 dprintf("quiesce done, handing off txg %llu\n", txg);
637 tx->tx_quiescing_txg = 0;
638 tx->tx_quiesced_txg = txg;
639 DTRACE_PROBE2(txg__quiesced, dsl_pool_t *, dp, uint64_t, txg);
640 cv_broadcast(&tx->tx_sync_more_cv);
641 cv_broadcast(&tx->tx_quiesce_done_cv);
642 }
643 }
644
645 /*
646 * Delay this thread by delay nanoseconds if we are still in the open
647 * transaction group and there is already a waiting txg quiesing or quiesced.
648 * Abort the delay if this txg stalls or enters the quiesing state.
649 */
650 void
651 txg_delay(dsl_pool_t *dp, uint64_t txg, hrtime_t delay, hrtime_t resolution)
652 {
653 tx_state_t *tx = &dp->dp_tx;
654 hrtime_t start = gethrtime();
655
656 /* don't delay if this txg could transition to quiescing immediately */
657 if (tx->tx_open_txg > txg ||
658 tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1)
659 return;
660
661 mutex_enter(&tx->tx_sync_lock);
662 if (tx->tx_open_txg > txg || tx->tx_synced_txg == txg-1) {
663 mutex_exit(&tx->tx_sync_lock);
664 return;
665 }
666
667 while (gethrtime() - start < delay &&
668 tx->tx_syncing_txg < txg-1 && !txg_stalled(dp)) {
669 (void) cv_timedwait_hires(&tx->tx_quiesce_more_cv,
670 &tx->tx_sync_lock, delay, resolution, 0);
671 }
672
673 DMU_TX_STAT_BUMP(dmu_tx_delay);
674
675 mutex_exit(&tx->tx_sync_lock);
676 }
677
678 void
679 txg_wait_synced(dsl_pool_t *dp, uint64_t txg)
680 {
681 tx_state_t *tx = &dp->dp_tx;
682
683 ASSERT(!dsl_pool_config_held(dp));
684
685 mutex_enter(&tx->tx_sync_lock);
686 ASSERT3U(tx->tx_threads, ==, 2);
687 if (txg == 0)
688 txg = tx->tx_open_txg + TXG_DEFER_SIZE;
689 if (tx->tx_sync_txg_waiting < txg)
690 tx->tx_sync_txg_waiting = txg;
691 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
692 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
693 while (tx->tx_synced_txg < txg) {
694 dprintf("broadcasting sync more "
695 "tx_synced=%llu waiting=%llu dp=%p\n",
696 tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
697 cv_broadcast(&tx->tx_sync_more_cv);
698 cv_wait_io(&tx->tx_sync_done_cv, &tx->tx_sync_lock);
699 }
700 mutex_exit(&tx->tx_sync_lock);
701 }
702
703 /*
704 * Wait for the specified open transaction group. Set should_quiesce
705 * when the current open txg should be quiesced immediately.
706 */
707 void
708 txg_wait_open(dsl_pool_t *dp, uint64_t txg, boolean_t should_quiesce)
709 {
710 tx_state_t *tx = &dp->dp_tx;
711
712 ASSERT(!dsl_pool_config_held(dp));
713
714 mutex_enter(&tx->tx_sync_lock);
715 ASSERT3U(tx->tx_threads, ==, 2);
716 if (txg == 0)
717 txg = tx->tx_open_txg + 1;
718 if (tx->tx_quiesce_txg_waiting < txg && should_quiesce)
719 tx->tx_quiesce_txg_waiting = txg;
720 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
721 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
722 while (tx->tx_open_txg < txg) {
723 cv_broadcast(&tx->tx_quiesce_more_cv);
724 /*
725 * Callers setting should_quiesce will use cv_wait_io() and
726 * be accounted for as iowait time. Otherwise, the caller is
727 * understood to be idle and cv_wait_sig() is used to prevent
728 * incorrectly inflating the system load average.
729 */
730 if (should_quiesce == B_TRUE) {
731 cv_wait_io(&tx->tx_quiesce_done_cv, &tx->tx_sync_lock);
732 } else {
733 cv_wait_sig(&tx->tx_quiesce_done_cv, &tx->tx_sync_lock);
734 }
735 }
736 mutex_exit(&tx->tx_sync_lock);
737 }
738
739 /*
740 * If there isn't a txg syncing or in the pipeline, push another txg through
741 * the pipeline by queiscing the open txg.
742 */
743 void
744 txg_kick(dsl_pool_t *dp)
745 {
746 tx_state_t *tx = &dp->dp_tx;
747
748 ASSERT(!dsl_pool_config_held(dp));
749
750 mutex_enter(&tx->tx_sync_lock);
751 if (!txg_is_syncing(dp) &&
752 !txg_is_quiescing(dp) &&
753 tx->tx_quiesce_txg_waiting <= tx->tx_open_txg &&
754 tx->tx_sync_txg_waiting <= tx->tx_synced_txg &&
755 tx->tx_quiesced_txg <= tx->tx_synced_txg) {
756 tx->tx_quiesce_txg_waiting = tx->tx_open_txg + 1;
757 cv_broadcast(&tx->tx_quiesce_more_cv);
758 }
759 mutex_exit(&tx->tx_sync_lock);
760 }
761
762 boolean_t
763 txg_stalled(dsl_pool_t *dp)
764 {
765 tx_state_t *tx = &dp->dp_tx;
766 return (tx->tx_quiesce_txg_waiting > tx->tx_open_txg);
767 }
768
769 boolean_t
770 txg_sync_waiting(dsl_pool_t *dp)
771 {
772 tx_state_t *tx = &dp->dp_tx;
773
774 return (tx->tx_syncing_txg <= tx->tx_sync_txg_waiting ||
775 tx->tx_quiesced_txg != 0);
776 }
777
778 /*
779 * Verify that this txg is active (open, quiescing, syncing). Non-active
780 * txg's should not be manipulated.
781 */
782 #ifdef ZFS_DEBUG
783 void
784 txg_verify(spa_t *spa, uint64_t txg)
785 {
786 ASSERTV(dsl_pool_t *dp = spa_get_dsl(spa));
787 if (txg <= TXG_INITIAL || txg == ZILTEST_TXG)
788 return;
789 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
790 ASSERT3U(txg, >=, dp->dp_tx.tx_synced_txg);
791 ASSERT3U(txg, >=, dp->dp_tx.tx_open_txg - TXG_CONCURRENT_STATES);
792 }
793 #endif
794
795 /*
796 * Per-txg object lists.
797 */
798 void
799 txg_list_create(txg_list_t *tl, spa_t *spa, size_t offset)
800 {
801 int t;
802
803 mutex_init(&tl->tl_lock, NULL, MUTEX_DEFAULT, NULL);
804
805 tl->tl_offset = offset;
806 tl->tl_spa = spa;
807
808 for (t = 0; t < TXG_SIZE; t++)
809 tl->tl_head[t] = NULL;
810 }
811
812 static boolean_t
813 txg_list_empty_impl(txg_list_t *tl, uint64_t txg)
814 {
815 ASSERT(MUTEX_HELD(&tl->tl_lock));
816 TXG_VERIFY(tl->tl_spa, txg);
817 return (tl->tl_head[txg & TXG_MASK] == NULL);
818 }
819
820 boolean_t
821 txg_list_empty(txg_list_t *tl, uint64_t txg)
822 {
823 mutex_enter(&tl->tl_lock);
824 boolean_t ret = txg_list_empty_impl(tl, txg);
825 mutex_exit(&tl->tl_lock);
826
827 return (ret);
828 }
829
830 void
831 txg_list_destroy(txg_list_t *tl)
832 {
833 int t;
834
835 mutex_enter(&tl->tl_lock);
836 for (t = 0; t < TXG_SIZE; t++)
837 ASSERT(txg_list_empty_impl(tl, t));
838 mutex_exit(&tl->tl_lock);
839
840 mutex_destroy(&tl->tl_lock);
841 }
842
843 /*
844 * Returns true if all txg lists are empty.
845 *
846 * Warning: this is inherently racy (an item could be added immediately
847 * after this function returns).
848 */
849 boolean_t
850 txg_all_lists_empty(txg_list_t *tl)
851 {
852 mutex_enter(&tl->tl_lock);
853 for (int i = 0; i < TXG_SIZE; i++) {
854 if (!txg_list_empty_impl(tl, i)) {
855 mutex_exit(&tl->tl_lock);
856 return (B_FALSE);
857 }
858 }
859 mutex_exit(&tl->tl_lock);
860 return (B_TRUE);
861 }
862
863 /*
864 * Add an entry to the list (unless it's already on the list).
865 * Returns B_TRUE if it was actually added.
866 */
867 boolean_t
868 txg_list_add(txg_list_t *tl, void *p, uint64_t txg)
869 {
870 int t = txg & TXG_MASK;
871 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
872 boolean_t add;
873
874 TXG_VERIFY(tl->tl_spa, txg);
875 mutex_enter(&tl->tl_lock);
876 add = (tn->tn_member[t] == 0);
877 if (add) {
878 tn->tn_member[t] = 1;
879 tn->tn_next[t] = tl->tl_head[t];
880 tl->tl_head[t] = tn;
881 }
882 mutex_exit(&tl->tl_lock);
883
884 return (add);
885 }
886
887 /*
888 * Add an entry to the end of the list, unless it's already on the list.
889 * (walks list to find end)
890 * Returns B_TRUE if it was actually added.
891 */
892 boolean_t
893 txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg)
894 {
895 int t = txg & TXG_MASK;
896 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
897 boolean_t add;
898
899 TXG_VERIFY(tl->tl_spa, txg);
900 mutex_enter(&tl->tl_lock);
901 add = (tn->tn_member[t] == 0);
902 if (add) {
903 txg_node_t **tp;
904
905 for (tp = &tl->tl_head[t]; *tp != NULL; tp = &(*tp)->tn_next[t])
906 continue;
907
908 tn->tn_member[t] = 1;
909 tn->tn_next[t] = NULL;
910 *tp = tn;
911 }
912 mutex_exit(&tl->tl_lock);
913
914 return (add);
915 }
916
917 /*
918 * Remove the head of the list and return it.
919 */
920 void *
921 txg_list_remove(txg_list_t *tl, uint64_t txg)
922 {
923 int t = txg & TXG_MASK;
924 txg_node_t *tn;
925 void *p = NULL;
926
927 TXG_VERIFY(tl->tl_spa, txg);
928 mutex_enter(&tl->tl_lock);
929 if ((tn = tl->tl_head[t]) != NULL) {
930 ASSERT(tn->tn_member[t]);
931 ASSERT(tn->tn_next[t] == NULL || tn->tn_next[t]->tn_member[t]);
932 p = (char *)tn - tl->tl_offset;
933 tl->tl_head[t] = tn->tn_next[t];
934 tn->tn_next[t] = NULL;
935 tn->tn_member[t] = 0;
936 }
937 mutex_exit(&tl->tl_lock);
938
939 return (p);
940 }
941
942 /*
943 * Remove a specific item from the list and return it.
944 */
945 void *
946 txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg)
947 {
948 int t = txg & TXG_MASK;
949 txg_node_t *tn, **tp;
950
951 TXG_VERIFY(tl->tl_spa, txg);
952 mutex_enter(&tl->tl_lock);
953
954 for (tp = &tl->tl_head[t]; (tn = *tp) != NULL; tp = &tn->tn_next[t]) {
955 if ((char *)tn - tl->tl_offset == p) {
956 *tp = tn->tn_next[t];
957 tn->tn_next[t] = NULL;
958 tn->tn_member[t] = 0;
959 mutex_exit(&tl->tl_lock);
960 return (p);
961 }
962 }
963
964 mutex_exit(&tl->tl_lock);
965
966 return (NULL);
967 }
968
969 boolean_t
970 txg_list_member(txg_list_t *tl, void *p, uint64_t txg)
971 {
972 int t = txg & TXG_MASK;
973 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
974
975 TXG_VERIFY(tl->tl_spa, txg);
976 return (tn->tn_member[t] != 0);
977 }
978
979 /*
980 * Walk a txg list
981 */
982 void *
983 txg_list_head(txg_list_t *tl, uint64_t txg)
984 {
985 int t = txg & TXG_MASK;
986 txg_node_t *tn;
987
988 mutex_enter(&tl->tl_lock);
989 tn = tl->tl_head[t];
990 mutex_exit(&tl->tl_lock);
991
992 TXG_VERIFY(tl->tl_spa, txg);
993 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
994 }
995
996 void *
997 txg_list_next(txg_list_t *tl, void *p, uint64_t txg)
998 {
999 int t = txg & TXG_MASK;
1000 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
1001
1002 TXG_VERIFY(tl->tl_spa, txg);
1003
1004 mutex_enter(&tl->tl_lock);
1005 tn = tn->tn_next[t];
1006 mutex_exit(&tl->tl_lock);
1007
1008 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
1009 }
1010
1011 #if defined(_KERNEL)
1012 EXPORT_SYMBOL(txg_init);
1013 EXPORT_SYMBOL(txg_fini);
1014 EXPORT_SYMBOL(txg_sync_start);
1015 EXPORT_SYMBOL(txg_sync_stop);
1016 EXPORT_SYMBOL(txg_hold_open);
1017 EXPORT_SYMBOL(txg_rele_to_quiesce);
1018 EXPORT_SYMBOL(txg_rele_to_sync);
1019 EXPORT_SYMBOL(txg_register_callbacks);
1020 EXPORT_SYMBOL(txg_delay);
1021 EXPORT_SYMBOL(txg_wait_synced);
1022 EXPORT_SYMBOL(txg_wait_open);
1023 EXPORT_SYMBOL(txg_wait_callbacks);
1024 EXPORT_SYMBOL(txg_stalled);
1025 EXPORT_SYMBOL(txg_sync_waiting);
1026
1027 module_param(zfs_txg_timeout, int, 0644);
1028 MODULE_PARM_DESC(zfs_txg_timeout, "Max seconds worth of delta per txg");
1029 #endif