]> git.proxmox.com Git - mirror_zfs-debian.git/blame - zfs/lib/libzpool/txg.c
Rebase to OpenSolaris b103, in the process we are removing any code which did not...
[mirror_zfs-debian.git] / zfs / lib / libzpool / txg.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
34dc7c2f
BB
26#include <sys/zfs_context.h>
27#include <sys/txg_impl.h>
28#include <sys/dmu_impl.h>
29#include <sys/dsl_pool.h>
30#include <sys/callb.h>
31
32/*
33 * Pool-wide transaction groups.
34 */
35
36static void txg_sync_thread(dsl_pool_t *dp);
37static void txg_quiesce_thread(dsl_pool_t *dp);
38
39int zfs_txg_timeout = 30; /* max seconds worth of delta per txg */
34dc7c2f
BB
40
41/*
42 * Prepare the txg subsystem.
43 */
44void
45txg_init(dsl_pool_t *dp, uint64_t txg)
46{
47 tx_state_t *tx = &dp->dp_tx;
48 int c;
49 bzero(tx, sizeof (tx_state_t));
50
51 tx->tx_cpu = kmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP);
52
53 for (c = 0; c < max_ncpus; c++) {
54 int i;
55
56 mutex_init(&tx->tx_cpu[c].tc_lock, NULL, MUTEX_DEFAULT, NULL);
57 for (i = 0; i < TXG_SIZE; i++) {
58 cv_init(&tx->tx_cpu[c].tc_cv[i], NULL, CV_DEFAULT,
59 NULL);
60 }
61 }
62
63 rw_init(&tx->tx_suspend, NULL, RW_DEFAULT, NULL);
64 mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL);
65
66 tx->tx_open_txg = txg;
67}
68
69/*
70 * Close down the txg subsystem.
71 */
72void
73txg_fini(dsl_pool_t *dp)
74{
75 tx_state_t *tx = &dp->dp_tx;
76 int c;
77
78 ASSERT(tx->tx_threads == 0);
79
80 rw_destroy(&tx->tx_suspend);
81 mutex_destroy(&tx->tx_sync_lock);
82
83 for (c = 0; c < max_ncpus; c++) {
84 int i;
85
86 mutex_destroy(&tx->tx_cpu[c].tc_lock);
87 for (i = 0; i < TXG_SIZE; i++)
88 cv_destroy(&tx->tx_cpu[c].tc_cv[i]);
89 }
90
91 kmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t));
92
93 bzero(tx, sizeof (tx_state_t));
94}
95
96/*
97 * Start syncing transaction groups.
98 */
99void
100txg_sync_start(dsl_pool_t *dp)
101{
102 tx_state_t *tx = &dp->dp_tx;
103
104 mutex_enter(&tx->tx_sync_lock);
105
106 dprintf("pool %p\n", dp);
107
108 ASSERT(tx->tx_threads == 0);
109
110 tx->tx_threads = 2;
111
112 tx->tx_quiesce_thread = thread_create(NULL, 0, txg_quiesce_thread,
113 dp, 0, &p0, TS_RUN, minclsyspri);
114
b128c09f
BB
115 /*
116 * The sync thread can need a larger-than-default stack size on
117 * 32-bit x86. This is due in part to nested pools and
118 * scrub_visitbp() recursion.
119 */
120 tx->tx_sync_thread = thread_create(NULL, 12<<10, txg_sync_thread,
34dc7c2f
BB
121 dp, 0, &p0, TS_RUN, minclsyspri);
122
123 mutex_exit(&tx->tx_sync_lock);
124}
125
126static void
127txg_thread_enter(tx_state_t *tx, callb_cpr_t *cpr)
128{
129 CALLB_CPR_INIT(cpr, &tx->tx_sync_lock, callb_generic_cpr, FTAG);
130 mutex_enter(&tx->tx_sync_lock);
131}
132
133static void
134txg_thread_exit(tx_state_t *tx, callb_cpr_t *cpr, kthread_t **tpp)
135{
136 ASSERT(*tpp != NULL);
137 *tpp = NULL;
138 tx->tx_threads--;
139 cv_broadcast(&tx->tx_exit_cv);
140 CALLB_CPR_EXIT(cpr); /* drops &tx->tx_sync_lock */
141 thread_exit();
142}
143
144static void
145txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, uint64_t time)
146{
147 CALLB_CPR_SAFE_BEGIN(cpr);
148
149 if (time)
150 (void) cv_timedwait(cv, &tx->tx_sync_lock, lbolt + time);
151 else
152 cv_wait(cv, &tx->tx_sync_lock);
153
154 CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock);
155}
156
157/*
158 * Stop syncing transaction groups.
159 */
160void
161txg_sync_stop(dsl_pool_t *dp)
162{
163 tx_state_t *tx = &dp->dp_tx;
164
165 dprintf("pool %p\n", dp);
166 /*
167 * Finish off any work in progress.
168 */
169 ASSERT(tx->tx_threads == 2);
170 txg_wait_synced(dp, 0);
171
172 /*
173 * Wake all sync threads and wait for them to die.
174 */
175 mutex_enter(&tx->tx_sync_lock);
176
177 ASSERT(tx->tx_threads == 2);
178
179 tx->tx_exiting = 1;
180
181 cv_broadcast(&tx->tx_quiesce_more_cv);
182 cv_broadcast(&tx->tx_quiesce_done_cv);
183 cv_broadcast(&tx->tx_sync_more_cv);
184
185 while (tx->tx_threads != 0)
186 cv_wait(&tx->tx_exit_cv, &tx->tx_sync_lock);
187
188 tx->tx_exiting = 0;
189
190 mutex_exit(&tx->tx_sync_lock);
191}
192
193uint64_t
194txg_hold_open(dsl_pool_t *dp, txg_handle_t *th)
195{
196 tx_state_t *tx = &dp->dp_tx;
197 tx_cpu_t *tc = &tx->tx_cpu[CPU_SEQID];
198 uint64_t txg;
199
200 mutex_enter(&tc->tc_lock);
201
202 txg = tx->tx_open_txg;
203 tc->tc_count[txg & TXG_MASK]++;
204
205 th->th_cpu = tc;
206 th->th_txg = txg;
207
208 return (txg);
209}
210
211void
212txg_rele_to_quiesce(txg_handle_t *th)
213{
214 tx_cpu_t *tc = th->th_cpu;
215
216 mutex_exit(&tc->tc_lock);
217}
218
219void
220txg_rele_to_sync(txg_handle_t *th)
221{
222 tx_cpu_t *tc = th->th_cpu;
223 int g = th->th_txg & TXG_MASK;
224
225 mutex_enter(&tc->tc_lock);
226 ASSERT(tc->tc_count[g] != 0);
227 if (--tc->tc_count[g] == 0)
228 cv_broadcast(&tc->tc_cv[g]);
229 mutex_exit(&tc->tc_lock);
230
231 th->th_cpu = NULL; /* defensive */
232}
233
234static void
235txg_quiesce(dsl_pool_t *dp, uint64_t txg)
236{
237 tx_state_t *tx = &dp->dp_tx;
238 int g = txg & TXG_MASK;
239 int c;
240
241 /*
242 * Grab all tx_cpu locks so nobody else can get into this txg.
243 */
244 for (c = 0; c < max_ncpus; c++)
245 mutex_enter(&tx->tx_cpu[c].tc_lock);
246
247 ASSERT(txg == tx->tx_open_txg);
248 tx->tx_open_txg++;
249
250 /*
251 * Now that we've incremented tx_open_txg, we can let threads
252 * enter the next transaction group.
253 */
254 for (c = 0; c < max_ncpus; c++)
255 mutex_exit(&tx->tx_cpu[c].tc_lock);
256
257 /*
258 * Quiesce the transaction group by waiting for everyone to txg_exit().
259 */
260 for (c = 0; c < max_ncpus; c++) {
261 tx_cpu_t *tc = &tx->tx_cpu[c];
262 mutex_enter(&tc->tc_lock);
263 while (tc->tc_count[g] != 0)
264 cv_wait(&tc->tc_cv[g], &tc->tc_lock);
265 mutex_exit(&tc->tc_lock);
266 }
267}
268
269static void
270txg_sync_thread(dsl_pool_t *dp)
271{
272 tx_state_t *tx = &dp->dp_tx;
273 callb_cpr_t cpr;
b128c09f 274 uint64_t start, delta;
34dc7c2f
BB
275
276 txg_thread_enter(tx, &cpr);
277
278 start = delta = 0;
34dc7c2f 279 for (;;) {
b128c09f
BB
280 uint64_t timer, timeout = zfs_txg_timeout * hz;
281 uint64_t txg;
34dc7c2f
BB
282
283 /*
b128c09f
BB
284 * We sync when we're scrubbing, there's someone waiting
285 * on us, or the quiesce thread has handed off a txg to
286 * us, or we have reached our timeout.
34dc7c2f
BB
287 */
288 timer = (delta >= timeout ? 0 : timeout - delta);
b128c09f
BB
289 while ((dp->dp_scrub_func == SCRUB_FUNC_NONE ||
290 spa_shutting_down(dp->dp_spa)) &&
291 !tx->tx_exiting && timer > 0 &&
34dc7c2f
BB
292 tx->tx_synced_txg >= tx->tx_sync_txg_waiting &&
293 tx->tx_quiesced_txg == 0) {
294 dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
295 tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
296 txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer);
297 delta = lbolt - start;
298 timer = (delta > timeout ? 0 : timeout - delta);
299 }
300
301 /*
302 * Wait until the quiesce thread hands off a txg to us,
303 * prompting it to do so if necessary.
304 */
305 while (!tx->tx_exiting && tx->tx_quiesced_txg == 0) {
306 if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1)
307 tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1;
308 cv_broadcast(&tx->tx_quiesce_more_cv);
309 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0);
310 }
311
312 if (tx->tx_exiting)
313 txg_thread_exit(tx, &cpr, &tx->tx_sync_thread);
314
315 rw_enter(&tx->tx_suspend, RW_WRITER);
316
317 /*
318 * Consume the quiesced txg which has been handed off to
319 * us. This may cause the quiescing thread to now be
320 * able to quiesce another txg, so we must signal it.
321 */
322 txg = tx->tx_quiesced_txg;
323 tx->tx_quiesced_txg = 0;
324 tx->tx_syncing_txg = txg;
325 cv_broadcast(&tx->tx_quiesce_more_cv);
326 rw_exit(&tx->tx_suspend);
327
328 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
329 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
330 mutex_exit(&tx->tx_sync_lock);
b128c09f 331
34dc7c2f
BB
332 start = lbolt;
333 spa_sync(dp->dp_spa, txg);
334 delta = lbolt - start;
335
34dc7c2f
BB
336 mutex_enter(&tx->tx_sync_lock);
337 rw_enter(&tx->tx_suspend, RW_WRITER);
338 tx->tx_synced_txg = txg;
339 tx->tx_syncing_txg = 0;
340 rw_exit(&tx->tx_suspend);
341 cv_broadcast(&tx->tx_sync_done_cv);
342 }
343}
344
345static void
346txg_quiesce_thread(dsl_pool_t *dp)
347{
348 tx_state_t *tx = &dp->dp_tx;
349 callb_cpr_t cpr;
350
351 txg_thread_enter(tx, &cpr);
352
353 for (;;) {
354 uint64_t txg;
355
356 /*
357 * We quiesce when there's someone waiting on us.
358 * However, we can only have one txg in "quiescing" or
359 * "quiesced, waiting to sync" state. So we wait until
360 * the "quiesced, waiting to sync" txg has been consumed
361 * by the sync thread.
362 */
363 while (!tx->tx_exiting &&
364 (tx->tx_open_txg >= tx->tx_quiesce_txg_waiting ||
365 tx->tx_quiesced_txg != 0))
366 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_more_cv, 0);
367
368 if (tx->tx_exiting)
369 txg_thread_exit(tx, &cpr, &tx->tx_quiesce_thread);
370
371 txg = tx->tx_open_txg;
372 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
373 txg, tx->tx_quiesce_txg_waiting,
374 tx->tx_sync_txg_waiting);
375 mutex_exit(&tx->tx_sync_lock);
376 txg_quiesce(dp, txg);
377 mutex_enter(&tx->tx_sync_lock);
378
379 /*
380 * Hand this txg off to the sync thread.
381 */
382 dprintf("quiesce done, handing off txg %llu\n", txg);
383 tx->tx_quiesced_txg = txg;
384 cv_broadcast(&tx->tx_sync_more_cv);
385 cv_broadcast(&tx->tx_quiesce_done_cv);
386 }
387}
388
389/*
390 * Delay this thread by 'ticks' if we are still in the open transaction
391 * group and there is already a waiting txg quiesing or quiesced. Abort
392 * the delay if this txg stalls or enters the quiesing state.
393 */
394void
395txg_delay(dsl_pool_t *dp, uint64_t txg, int ticks)
396{
397 tx_state_t *tx = &dp->dp_tx;
398 int timeout = lbolt + ticks;
399
400 /* don't delay if this txg could transition to quiesing immediately */
401 if (tx->tx_open_txg > txg ||
402 tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1)
403 return;
404
405 mutex_enter(&tx->tx_sync_lock);
406 if (tx->tx_open_txg > txg || tx->tx_synced_txg == txg-1) {
407 mutex_exit(&tx->tx_sync_lock);
408 return;
409 }
410
411 while (lbolt < timeout &&
412 tx->tx_syncing_txg < txg-1 && !txg_stalled(dp))
413 (void) cv_timedwait(&tx->tx_quiesce_more_cv, &tx->tx_sync_lock,
414 timeout);
415
416 mutex_exit(&tx->tx_sync_lock);
417}
418
419void
420txg_wait_synced(dsl_pool_t *dp, uint64_t txg)
421{
422 tx_state_t *tx = &dp->dp_tx;
423
424 mutex_enter(&tx->tx_sync_lock);
425 ASSERT(tx->tx_threads == 2);
426 if (txg == 0)
427 txg = tx->tx_open_txg;
428 if (tx->tx_sync_txg_waiting < txg)
429 tx->tx_sync_txg_waiting = txg;
430 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
431 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
432 while (tx->tx_synced_txg < txg) {
433 dprintf("broadcasting sync more "
434 "tx_synced=%llu waiting=%llu dp=%p\n",
435 tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
436 cv_broadcast(&tx->tx_sync_more_cv);
437 cv_wait(&tx->tx_sync_done_cv, &tx->tx_sync_lock);
438 }
439 mutex_exit(&tx->tx_sync_lock);
440}
441
442void
443txg_wait_open(dsl_pool_t *dp, uint64_t txg)
444{
445 tx_state_t *tx = &dp->dp_tx;
446
447 mutex_enter(&tx->tx_sync_lock);
448 ASSERT(tx->tx_threads == 2);
449 if (txg == 0)
450 txg = tx->tx_open_txg + 1;
451 if (tx->tx_quiesce_txg_waiting < txg)
452 tx->tx_quiesce_txg_waiting = txg;
453 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
454 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
455 while (tx->tx_open_txg < txg) {
456 cv_broadcast(&tx->tx_quiesce_more_cv);
457 cv_wait(&tx->tx_quiesce_done_cv, &tx->tx_sync_lock);
458 }
459 mutex_exit(&tx->tx_sync_lock);
460}
461
b128c09f 462boolean_t
34dc7c2f
BB
463txg_stalled(dsl_pool_t *dp)
464{
465 tx_state_t *tx = &dp->dp_tx;
466 return (tx->tx_quiesce_txg_waiting > tx->tx_open_txg);
467}
468
b128c09f
BB
469boolean_t
470txg_sync_waiting(dsl_pool_t *dp)
471{
472 tx_state_t *tx = &dp->dp_tx;
473
474 return (tx->tx_syncing_txg <= tx->tx_sync_txg_waiting ||
475 tx->tx_quiesced_txg != 0);
476}
477
34dc7c2f
BB
478void
479txg_suspend(dsl_pool_t *dp)
480{
481 tx_state_t *tx = &dp->dp_tx;
482 /* XXX some code paths suspend when they are already suspended! */
483 rw_enter(&tx->tx_suspend, RW_READER);
484}
485
486void
487txg_resume(dsl_pool_t *dp)
488{
489 tx_state_t *tx = &dp->dp_tx;
490 rw_exit(&tx->tx_suspend);
491}
492
493/*
494 * Per-txg object lists.
495 */
496void
497txg_list_create(txg_list_t *tl, size_t offset)
498{
499 int t;
500
501 mutex_init(&tl->tl_lock, NULL, MUTEX_DEFAULT, NULL);
502
503 tl->tl_offset = offset;
504
505 for (t = 0; t < TXG_SIZE; t++)
506 tl->tl_head[t] = NULL;
507}
508
509void
510txg_list_destroy(txg_list_t *tl)
511{
512 int t;
513
514 for (t = 0; t < TXG_SIZE; t++)
515 ASSERT(txg_list_empty(tl, t));
516
517 mutex_destroy(&tl->tl_lock);
518}
519
520int
521txg_list_empty(txg_list_t *tl, uint64_t txg)
522{
523 return (tl->tl_head[txg & TXG_MASK] == NULL);
524}
525
526/*
527 * Add an entry to the list.
528 * Returns 0 if it's a new entry, 1 if it's already there.
529 */
530int
531txg_list_add(txg_list_t *tl, void *p, uint64_t txg)
532{
533 int t = txg & TXG_MASK;
534 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
535 int already_on_list;
536
537 mutex_enter(&tl->tl_lock);
538 already_on_list = tn->tn_member[t];
539 if (!already_on_list) {
540 tn->tn_member[t] = 1;
541 tn->tn_next[t] = tl->tl_head[t];
542 tl->tl_head[t] = tn;
543 }
544 mutex_exit(&tl->tl_lock);
545
546 return (already_on_list);
547}
548
549/*
550 * Remove the head of the list and return it.
551 */
552void *
553txg_list_remove(txg_list_t *tl, uint64_t txg)
554{
555 int t = txg & TXG_MASK;
556 txg_node_t *tn;
557 void *p = NULL;
558
559 mutex_enter(&tl->tl_lock);
560 if ((tn = tl->tl_head[t]) != NULL) {
561 p = (char *)tn - tl->tl_offset;
562 tl->tl_head[t] = tn->tn_next[t];
563 tn->tn_next[t] = NULL;
564 tn->tn_member[t] = 0;
565 }
566 mutex_exit(&tl->tl_lock);
567
568 return (p);
569}
570
571/*
572 * Remove a specific item from the list and return it.
573 */
574void *
575txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg)
576{
577 int t = txg & TXG_MASK;
578 txg_node_t *tn, **tp;
579
580 mutex_enter(&tl->tl_lock);
581
582 for (tp = &tl->tl_head[t]; (tn = *tp) != NULL; tp = &tn->tn_next[t]) {
583 if ((char *)tn - tl->tl_offset == p) {
584 *tp = tn->tn_next[t];
585 tn->tn_next[t] = NULL;
586 tn->tn_member[t] = 0;
587 mutex_exit(&tl->tl_lock);
588 return (p);
589 }
590 }
591
592 mutex_exit(&tl->tl_lock);
593
594 return (NULL);
595}
596
597int
598txg_list_member(txg_list_t *tl, void *p, uint64_t txg)
599{
600 int t = txg & TXG_MASK;
601 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
602
603 return (tn->tn_member[t]);
604}
605
606/*
607 * Walk a txg list -- only safe if you know it's not changing.
608 */
609void *
610txg_list_head(txg_list_t *tl, uint64_t txg)
611{
612 int t = txg & TXG_MASK;
613 txg_node_t *tn = tl->tl_head[t];
614
615 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
616}
617
618void *
619txg_list_next(txg_list_t *tl, void *p, uint64_t txg)
620{
621 int t = txg & TXG_MASK;
622 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
623
624 tn = tn->tn_next[t];
625
626 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
627}