]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
34dc7c2f BB |
23 | */ |
24 | ||
34dc7c2f BB |
25 | #include <sys/zfs_context.h> |
26 | #include <sys/txg_impl.h> | |
27 | #include <sys/dmu_impl.h> | |
428870ff | 28 | #include <sys/dmu_tx.h> |
34dc7c2f | 29 | #include <sys/dsl_pool.h> |
428870ff | 30 | #include <sys/dsl_scan.h> |
34dc7c2f BB |
31 | #include <sys/callb.h> |
32 | ||
33 | /* | |
34 | * Pool-wide transaction groups. | |
35 | */ | |
36 | ||
37 | static void txg_sync_thread(dsl_pool_t *dp); | |
38 | static void txg_quiesce_thread(dsl_pool_t *dp); | |
39 | ||
572e2857 | 40 | int zfs_txg_timeout = 5; /* max seconds worth of delta per txg */ |
34dc7c2f BB |
41 | |
42 | /* | |
43 | * Prepare the txg subsystem. | |
44 | */ | |
45 | void | |
46 | txg_init(dsl_pool_t *dp, uint64_t txg) | |
47 | { | |
48 | tx_state_t *tx = &dp->dp_tx; | |
49 | int c; | |
50 | bzero(tx, sizeof (tx_state_t)); | |
51 | ||
00b46022 | 52 | tx->tx_cpu = vmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP); |
34dc7c2f BB |
53 | |
54 | for (c = 0; c < max_ncpus; c++) { | |
55 | int i; | |
56 | ||
57 | mutex_init(&tx->tx_cpu[c].tc_lock, NULL, MUTEX_DEFAULT, NULL); | |
58 | for (i = 0; i < TXG_SIZE; i++) { | |
59 | cv_init(&tx->tx_cpu[c].tc_cv[i], NULL, CV_DEFAULT, | |
60 | NULL); | |
428870ff BB |
61 | list_create(&tx->tx_cpu[c].tc_callbacks[i], |
62 | sizeof (dmu_tx_callback_t), | |
63 | offsetof(dmu_tx_callback_t, dcb_node)); | |
34dc7c2f BB |
64 | } |
65 | } | |
66 | ||
34dc7c2f BB |
67 | mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL); |
68 | ||
fb5f0bc8 BB |
69 | cv_init(&tx->tx_sync_more_cv, NULL, CV_DEFAULT, NULL); |
70 | cv_init(&tx->tx_sync_done_cv, NULL, CV_DEFAULT, NULL); | |
71 | cv_init(&tx->tx_quiesce_more_cv, NULL, CV_DEFAULT, NULL); | |
72 | cv_init(&tx->tx_quiesce_done_cv, NULL, CV_DEFAULT, NULL); | |
73 | cv_init(&tx->tx_exit_cv, NULL, CV_DEFAULT, NULL); | |
74 | ||
34dc7c2f BB |
75 | tx->tx_open_txg = txg; |
76 | } | |
77 | ||
78 | /* | |
79 | * Close down the txg subsystem. | |
80 | */ | |
81 | void | |
82 | txg_fini(dsl_pool_t *dp) | |
83 | { | |
84 | tx_state_t *tx = &dp->dp_tx; | |
85 | int c; | |
86 | ||
87 | ASSERT(tx->tx_threads == 0); | |
88 | ||
34dc7c2f BB |
89 | mutex_destroy(&tx->tx_sync_lock); |
90 | ||
fb5f0bc8 BB |
91 | cv_destroy(&tx->tx_sync_more_cv); |
92 | cv_destroy(&tx->tx_sync_done_cv); | |
93 | cv_destroy(&tx->tx_quiesce_more_cv); | |
94 | cv_destroy(&tx->tx_quiesce_done_cv); | |
95 | cv_destroy(&tx->tx_exit_cv); | |
96 | ||
34dc7c2f BB |
97 | for (c = 0; c < max_ncpus; c++) { |
98 | int i; | |
99 | ||
100 | mutex_destroy(&tx->tx_cpu[c].tc_lock); | |
428870ff | 101 | for (i = 0; i < TXG_SIZE; i++) { |
34dc7c2f | 102 | cv_destroy(&tx->tx_cpu[c].tc_cv[i]); |
428870ff BB |
103 | list_destroy(&tx->tx_cpu[c].tc_callbacks[i]); |
104 | } | |
34dc7c2f BB |
105 | } |
106 | ||
428870ff BB |
107 | if (tx->tx_commit_cb_taskq != NULL) |
108 | taskq_destroy(tx->tx_commit_cb_taskq); | |
109 | ||
00b46022 | 110 | vmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t)); |
34dc7c2f BB |
111 | |
112 | bzero(tx, sizeof (tx_state_t)); | |
113 | } | |
114 | ||
115 | /* | |
116 | * Start syncing transaction groups. | |
117 | */ | |
118 | void | |
119 | txg_sync_start(dsl_pool_t *dp) | |
120 | { | |
121 | tx_state_t *tx = &dp->dp_tx; | |
122 | ||
123 | mutex_enter(&tx->tx_sync_lock); | |
124 | ||
125 | dprintf("pool %p\n", dp); | |
126 | ||
127 | ASSERT(tx->tx_threads == 0); | |
128 | ||
129 | tx->tx_threads = 2; | |
130 | ||
131 | tx->tx_quiesce_thread = thread_create(NULL, 0, txg_quiesce_thread, | |
132 | dp, 0, &p0, TS_RUN, minclsyspri); | |
133 | ||
b128c09f BB |
134 | /* |
135 | * The sync thread can need a larger-than-default stack size on | |
136 | * 32-bit x86. This is due in part to nested pools and | |
137 | * scrub_visitbp() recursion. | |
138 | */ | |
428870ff | 139 | tx->tx_sync_thread = thread_create(NULL, 32<<10, txg_sync_thread, |
34dc7c2f BB |
140 | dp, 0, &p0, TS_RUN, minclsyspri); |
141 | ||
142 | mutex_exit(&tx->tx_sync_lock); | |
143 | } | |
144 | ||
145 | static void | |
146 | txg_thread_enter(tx_state_t *tx, callb_cpr_t *cpr) | |
147 | { | |
148 | CALLB_CPR_INIT(cpr, &tx->tx_sync_lock, callb_generic_cpr, FTAG); | |
149 | mutex_enter(&tx->tx_sync_lock); | |
150 | } | |
151 | ||
152 | static void | |
153 | txg_thread_exit(tx_state_t *tx, callb_cpr_t *cpr, kthread_t **tpp) | |
154 | { | |
155 | ASSERT(*tpp != NULL); | |
156 | *tpp = NULL; | |
157 | tx->tx_threads--; | |
158 | cv_broadcast(&tx->tx_exit_cv); | |
159 | CALLB_CPR_EXIT(cpr); /* drops &tx->tx_sync_lock */ | |
160 | thread_exit(); | |
161 | } | |
162 | ||
163 | static void | |
164 | txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, uint64_t time) | |
165 | { | |
166 | CALLB_CPR_SAFE_BEGIN(cpr); | |
167 | ||
168 | if (time) | |
bfd214af | 169 | (void) cv_timedwait_interruptible(cv, &tx->tx_sync_lock, |
428870ff | 170 | ddi_get_lbolt() + time); |
34dc7c2f | 171 | else |
bfd214af | 172 | cv_wait_interruptible(cv, &tx->tx_sync_lock); |
34dc7c2f BB |
173 | |
174 | CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock); | |
175 | } | |
176 | ||
177 | /* | |
178 | * Stop syncing transaction groups. | |
179 | */ | |
180 | void | |
181 | txg_sync_stop(dsl_pool_t *dp) | |
182 | { | |
183 | tx_state_t *tx = &dp->dp_tx; | |
184 | ||
185 | dprintf("pool %p\n", dp); | |
186 | /* | |
187 | * Finish off any work in progress. | |
188 | */ | |
189 | ASSERT(tx->tx_threads == 2); | |
428870ff BB |
190 | |
191 | /* | |
192 | * We need to ensure that we've vacated the deferred space_maps. | |
193 | */ | |
194 | txg_wait_synced(dp, tx->tx_open_txg + TXG_DEFER_SIZE); | |
34dc7c2f BB |
195 | |
196 | /* | |
197 | * Wake all sync threads and wait for them to die. | |
198 | */ | |
199 | mutex_enter(&tx->tx_sync_lock); | |
200 | ||
201 | ASSERT(tx->tx_threads == 2); | |
202 | ||
203 | tx->tx_exiting = 1; | |
204 | ||
205 | cv_broadcast(&tx->tx_quiesce_more_cv); | |
206 | cv_broadcast(&tx->tx_quiesce_done_cv); | |
207 | cv_broadcast(&tx->tx_sync_more_cv); | |
208 | ||
209 | while (tx->tx_threads != 0) | |
210 | cv_wait(&tx->tx_exit_cv, &tx->tx_sync_lock); | |
211 | ||
212 | tx->tx_exiting = 0; | |
213 | ||
214 | mutex_exit(&tx->tx_sync_lock); | |
215 | } | |
216 | ||
217 | uint64_t | |
218 | txg_hold_open(dsl_pool_t *dp, txg_handle_t *th) | |
219 | { | |
220 | tx_state_t *tx = &dp->dp_tx; | |
221 | tx_cpu_t *tc = &tx->tx_cpu[CPU_SEQID]; | |
222 | uint64_t txg; | |
223 | ||
224 | mutex_enter(&tc->tc_lock); | |
225 | ||
226 | txg = tx->tx_open_txg; | |
227 | tc->tc_count[txg & TXG_MASK]++; | |
228 | ||
229 | th->th_cpu = tc; | |
230 | th->th_txg = txg; | |
231 | ||
232 | return (txg); | |
233 | } | |
234 | ||
235 | void | |
236 | txg_rele_to_quiesce(txg_handle_t *th) | |
237 | { | |
238 | tx_cpu_t *tc = th->th_cpu; | |
239 | ||
240 | mutex_exit(&tc->tc_lock); | |
241 | } | |
242 | ||
428870ff BB |
243 | void |
244 | txg_register_callbacks(txg_handle_t *th, list_t *tx_callbacks) | |
245 | { | |
246 | tx_cpu_t *tc = th->th_cpu; | |
247 | int g = th->th_txg & TXG_MASK; | |
248 | ||
249 | mutex_enter(&tc->tc_lock); | |
250 | list_move_tail(&tc->tc_callbacks[g], tx_callbacks); | |
251 | mutex_exit(&tc->tc_lock); | |
252 | } | |
253 | ||
34dc7c2f BB |
254 | void |
255 | txg_rele_to_sync(txg_handle_t *th) | |
256 | { | |
257 | tx_cpu_t *tc = th->th_cpu; | |
258 | int g = th->th_txg & TXG_MASK; | |
259 | ||
260 | mutex_enter(&tc->tc_lock); | |
261 | ASSERT(tc->tc_count[g] != 0); | |
262 | if (--tc->tc_count[g] == 0) | |
263 | cv_broadcast(&tc->tc_cv[g]); | |
264 | mutex_exit(&tc->tc_lock); | |
265 | ||
266 | th->th_cpu = NULL; /* defensive */ | |
267 | } | |
268 | ||
269 | static void | |
270 | txg_quiesce(dsl_pool_t *dp, uint64_t txg) | |
271 | { | |
272 | tx_state_t *tx = &dp->dp_tx; | |
273 | int g = txg & TXG_MASK; | |
274 | int c; | |
275 | ||
276 | /* | |
277 | * Grab all tx_cpu locks so nobody else can get into this txg. | |
278 | */ | |
279 | for (c = 0; c < max_ncpus; c++) | |
280 | mutex_enter(&tx->tx_cpu[c].tc_lock); | |
281 | ||
282 | ASSERT(txg == tx->tx_open_txg); | |
283 | tx->tx_open_txg++; | |
284 | ||
285 | /* | |
286 | * Now that we've incremented tx_open_txg, we can let threads | |
287 | * enter the next transaction group. | |
288 | */ | |
289 | for (c = 0; c < max_ncpus; c++) | |
290 | mutex_exit(&tx->tx_cpu[c].tc_lock); | |
291 | ||
292 | /* | |
293 | * Quiesce the transaction group by waiting for everyone to txg_exit(). | |
294 | */ | |
295 | for (c = 0; c < max_ncpus; c++) { | |
296 | tx_cpu_t *tc = &tx->tx_cpu[c]; | |
297 | mutex_enter(&tc->tc_lock); | |
298 | while (tc->tc_count[g] != 0) | |
299 | cv_wait(&tc->tc_cv[g], &tc->tc_lock); | |
300 | mutex_exit(&tc->tc_lock); | |
301 | } | |
302 | } | |
303 | ||
428870ff BB |
304 | static void |
305 | txg_do_callbacks(list_t *cb_list) | |
306 | { | |
307 | dmu_tx_do_callbacks(cb_list, 0); | |
308 | ||
309 | list_destroy(cb_list); | |
310 | ||
311 | kmem_free(cb_list, sizeof (list_t)); | |
312 | } | |
313 | ||
314 | /* | |
315 | * Dispatch the commit callbacks registered on this txg to worker threads. | |
316 | */ | |
317 | static void | |
318 | txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg) | |
319 | { | |
320 | int c; | |
321 | tx_state_t *tx = &dp->dp_tx; | |
322 | list_t *cb_list; | |
323 | ||
324 | for (c = 0; c < max_ncpus; c++) { | |
325 | tx_cpu_t *tc = &tx->tx_cpu[c]; | |
326 | /* No need to lock tx_cpu_t at this point */ | |
327 | ||
328 | int g = txg & TXG_MASK; | |
329 | ||
330 | if (list_is_empty(&tc->tc_callbacks[g])) | |
331 | continue; | |
332 | ||
333 | if (tx->tx_commit_cb_taskq == NULL) { | |
334 | /* | |
335 | * Commit callback taskq hasn't been created yet. | |
336 | */ | |
337 | tx->tx_commit_cb_taskq = taskq_create("tx_commit_cb", | |
090ff092 RC |
338 | 100, minclsyspri, max_ncpus, INT_MAX, |
339 | TASKQ_THREADS_CPU_PCT | TASKQ_PREPOPULATE); | |
428870ff BB |
340 | } |
341 | ||
342 | cb_list = kmem_alloc(sizeof (list_t), KM_SLEEP); | |
343 | list_create(cb_list, sizeof (dmu_tx_callback_t), | |
344 | offsetof(dmu_tx_callback_t, dcb_node)); | |
345 | ||
090ff092 | 346 | list_move_tail(cb_list, &tc->tc_callbacks[g]); |
428870ff BB |
347 | |
348 | (void) taskq_dispatch(tx->tx_commit_cb_taskq, (task_func_t *) | |
349 | txg_do_callbacks, cb_list, TQ_SLEEP); | |
350 | } | |
351 | } | |
352 | ||
54a179e7 RC |
353 | /* |
354 | * Wait for pending commit callbacks of already-synced transactions to finish | |
355 | * processing. | |
356 | * Calling this function from within a commit callback will deadlock. | |
357 | */ | |
358 | void | |
359 | txg_wait_callbacks(dsl_pool_t *dp) | |
360 | { | |
361 | tx_state_t *tx = &dp->dp_tx; | |
362 | ||
363 | if (tx->tx_commit_cb_taskq != NULL) | |
364 | taskq_wait(tx->tx_commit_cb_taskq); | |
365 | } | |
366 | ||
34dc7c2f BB |
367 | static void |
368 | txg_sync_thread(dsl_pool_t *dp) | |
369 | { | |
428870ff | 370 | spa_t *spa = dp->dp_spa; |
34dc7c2f BB |
371 | tx_state_t *tx = &dp->dp_tx; |
372 | callb_cpr_t cpr; | |
b128c09f | 373 | uint64_t start, delta; |
34dc7c2f | 374 | |
eec81647 BB |
375 | #ifdef _KERNEL |
376 | /* | |
377 | * Disable the normal reclaim path for the txg_sync thread. This | |
378 | * ensures the thread will never enter dmu_tx_assign() which can | |
379 | * otherwise occur due to direct reclaim. If this is allowed to | |
380 | * happen the system can deadlock. Direct reclaim call path: | |
381 | * | |
382 | * ->shrink_icache_memory->prune_icache->dispose_list-> | |
383 | * clear_inode->zpl_clear_inode->zfs_inactive->dmu_tx_assign | |
384 | */ | |
385 | current->flags |= PF_MEMALLOC; | |
386 | #endif /* _KERNEL */ | |
387 | ||
34dc7c2f BB |
388 | txg_thread_enter(tx, &cpr); |
389 | ||
390 | start = delta = 0; | |
34dc7c2f | 391 | for (;;) { |
b128c09f BB |
392 | uint64_t timer, timeout = zfs_txg_timeout * hz; |
393 | uint64_t txg; | |
34dc7c2f BB |
394 | |
395 | /* | |
428870ff | 396 | * We sync when we're scanning, there's someone waiting |
b128c09f BB |
397 | * on us, or the quiesce thread has handed off a txg to |
398 | * us, or we have reached our timeout. | |
34dc7c2f BB |
399 | */ |
400 | timer = (delta >= timeout ? 0 : timeout - delta); | |
428870ff | 401 | while (!dsl_scan_active(dp->dp_scan) && |
b128c09f | 402 | !tx->tx_exiting && timer > 0 && |
34dc7c2f BB |
403 | tx->tx_synced_txg >= tx->tx_sync_txg_waiting && |
404 | tx->tx_quiesced_txg == 0) { | |
405 | dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n", | |
406 | tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp); | |
407 | txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer); | |
428870ff | 408 | delta = ddi_get_lbolt() - start; |
34dc7c2f BB |
409 | timer = (delta > timeout ? 0 : timeout - delta); |
410 | } | |
411 | ||
412 | /* | |
413 | * Wait until the quiesce thread hands off a txg to us, | |
414 | * prompting it to do so if necessary. | |
415 | */ | |
416 | while (!tx->tx_exiting && tx->tx_quiesced_txg == 0) { | |
417 | if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1) | |
418 | tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1; | |
419 | cv_broadcast(&tx->tx_quiesce_more_cv); | |
420 | txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0); | |
421 | } | |
422 | ||
423 | if (tx->tx_exiting) | |
424 | txg_thread_exit(tx, &cpr, &tx->tx_sync_thread); | |
425 | ||
34dc7c2f BB |
426 | /* |
427 | * Consume the quiesced txg which has been handed off to | |
428 | * us. This may cause the quiescing thread to now be | |
429 | * able to quiesce another txg, so we must signal it. | |
430 | */ | |
431 | txg = tx->tx_quiesced_txg; | |
432 | tx->tx_quiesced_txg = 0; | |
433 | tx->tx_syncing_txg = txg; | |
434 | cv_broadcast(&tx->tx_quiesce_more_cv); | |
34dc7c2f BB |
435 | |
436 | dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n", | |
437 | txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting); | |
438 | mutex_exit(&tx->tx_sync_lock); | |
b128c09f | 439 | |
428870ff BB |
440 | start = ddi_get_lbolt(); |
441 | spa_sync(spa, txg); | |
442 | delta = ddi_get_lbolt() - start; | |
34dc7c2f | 443 | |
34dc7c2f | 444 | mutex_enter(&tx->tx_sync_lock); |
34dc7c2f BB |
445 | tx->tx_synced_txg = txg; |
446 | tx->tx_syncing_txg = 0; | |
34dc7c2f | 447 | cv_broadcast(&tx->tx_sync_done_cv); |
428870ff BB |
448 | |
449 | /* | |
450 | * Dispatch commit callbacks to worker threads. | |
451 | */ | |
452 | txg_dispatch_callbacks(dp, txg); | |
34dc7c2f BB |
453 | } |
454 | } | |
455 | ||
456 | static void | |
457 | txg_quiesce_thread(dsl_pool_t *dp) | |
458 | { | |
459 | tx_state_t *tx = &dp->dp_tx; | |
460 | callb_cpr_t cpr; | |
461 | ||
462 | txg_thread_enter(tx, &cpr); | |
463 | ||
464 | for (;;) { | |
465 | uint64_t txg; | |
466 | ||
467 | /* | |
468 | * We quiesce when there's someone waiting on us. | |
469 | * However, we can only have one txg in "quiescing" or | |
470 | * "quiesced, waiting to sync" state. So we wait until | |
471 | * the "quiesced, waiting to sync" txg has been consumed | |
472 | * by the sync thread. | |
473 | */ | |
474 | while (!tx->tx_exiting && | |
475 | (tx->tx_open_txg >= tx->tx_quiesce_txg_waiting || | |
476 | tx->tx_quiesced_txg != 0)) | |
477 | txg_thread_wait(tx, &cpr, &tx->tx_quiesce_more_cv, 0); | |
478 | ||
479 | if (tx->tx_exiting) | |
480 | txg_thread_exit(tx, &cpr, &tx->tx_quiesce_thread); | |
481 | ||
482 | txg = tx->tx_open_txg; | |
483 | dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n", | |
484 | txg, tx->tx_quiesce_txg_waiting, | |
485 | tx->tx_sync_txg_waiting); | |
486 | mutex_exit(&tx->tx_sync_lock); | |
487 | txg_quiesce(dp, txg); | |
488 | mutex_enter(&tx->tx_sync_lock); | |
489 | ||
490 | /* | |
491 | * Hand this txg off to the sync thread. | |
492 | */ | |
493 | dprintf("quiesce done, handing off txg %llu\n", txg); | |
494 | tx->tx_quiesced_txg = txg; | |
495 | cv_broadcast(&tx->tx_sync_more_cv); | |
496 | cv_broadcast(&tx->tx_quiesce_done_cv); | |
497 | } | |
498 | } | |
499 | ||
500 | /* | |
501 | * Delay this thread by 'ticks' if we are still in the open transaction | |
502 | * group and there is already a waiting txg quiesing or quiesced. Abort | |
503 | * the delay if this txg stalls or enters the quiesing state. | |
504 | */ | |
505 | void | |
506 | txg_delay(dsl_pool_t *dp, uint64_t txg, int ticks) | |
507 | { | |
508 | tx_state_t *tx = &dp->dp_tx; | |
428870ff | 509 | int timeout = ddi_get_lbolt() + ticks; |
34dc7c2f BB |
510 | |
511 | /* don't delay if this txg could transition to quiesing immediately */ | |
512 | if (tx->tx_open_txg > txg || | |
513 | tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1) | |
514 | return; | |
515 | ||
516 | mutex_enter(&tx->tx_sync_lock); | |
517 | if (tx->tx_open_txg > txg || tx->tx_synced_txg == txg-1) { | |
518 | mutex_exit(&tx->tx_sync_lock); | |
519 | return; | |
520 | } | |
521 | ||
428870ff | 522 | while (ddi_get_lbolt() < timeout && |
34dc7c2f BB |
523 | tx->tx_syncing_txg < txg-1 && !txg_stalled(dp)) |
524 | (void) cv_timedwait(&tx->tx_quiesce_more_cv, &tx->tx_sync_lock, | |
525 | timeout); | |
526 | ||
527 | mutex_exit(&tx->tx_sync_lock); | |
528 | } | |
529 | ||
530 | void | |
531 | txg_wait_synced(dsl_pool_t *dp, uint64_t txg) | |
532 | { | |
533 | tx_state_t *tx = &dp->dp_tx; | |
534 | ||
535 | mutex_enter(&tx->tx_sync_lock); | |
536 | ASSERT(tx->tx_threads == 2); | |
537 | if (txg == 0) | |
428870ff | 538 | txg = tx->tx_open_txg + TXG_DEFER_SIZE; |
34dc7c2f BB |
539 | if (tx->tx_sync_txg_waiting < txg) |
540 | tx->tx_sync_txg_waiting = txg; | |
541 | dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n", | |
542 | txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting); | |
543 | while (tx->tx_synced_txg < txg) { | |
544 | dprintf("broadcasting sync more " | |
545 | "tx_synced=%llu waiting=%llu dp=%p\n", | |
546 | tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp); | |
547 | cv_broadcast(&tx->tx_sync_more_cv); | |
548 | cv_wait(&tx->tx_sync_done_cv, &tx->tx_sync_lock); | |
549 | } | |
550 | mutex_exit(&tx->tx_sync_lock); | |
551 | } | |
552 | ||
553 | void | |
554 | txg_wait_open(dsl_pool_t *dp, uint64_t txg) | |
555 | { | |
556 | tx_state_t *tx = &dp->dp_tx; | |
557 | ||
558 | mutex_enter(&tx->tx_sync_lock); | |
559 | ASSERT(tx->tx_threads == 2); | |
560 | if (txg == 0) | |
561 | txg = tx->tx_open_txg + 1; | |
562 | if (tx->tx_quiesce_txg_waiting < txg) | |
563 | tx->tx_quiesce_txg_waiting = txg; | |
564 | dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n", | |
565 | txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting); | |
566 | while (tx->tx_open_txg < txg) { | |
567 | cv_broadcast(&tx->tx_quiesce_more_cv); | |
568 | cv_wait(&tx->tx_quiesce_done_cv, &tx->tx_sync_lock); | |
569 | } | |
570 | mutex_exit(&tx->tx_sync_lock); | |
571 | } | |
572 | ||
b128c09f | 573 | boolean_t |
34dc7c2f BB |
574 | txg_stalled(dsl_pool_t *dp) |
575 | { | |
576 | tx_state_t *tx = &dp->dp_tx; | |
577 | return (tx->tx_quiesce_txg_waiting > tx->tx_open_txg); | |
578 | } | |
579 | ||
b128c09f BB |
580 | boolean_t |
581 | txg_sync_waiting(dsl_pool_t *dp) | |
582 | { | |
583 | tx_state_t *tx = &dp->dp_tx; | |
584 | ||
585 | return (tx->tx_syncing_txg <= tx->tx_sync_txg_waiting || | |
586 | tx->tx_quiesced_txg != 0); | |
587 | } | |
588 | ||
34dc7c2f BB |
589 | /* |
590 | * Per-txg object lists. | |
591 | */ | |
592 | void | |
593 | txg_list_create(txg_list_t *tl, size_t offset) | |
594 | { | |
595 | int t; | |
596 | ||
597 | mutex_init(&tl->tl_lock, NULL, MUTEX_DEFAULT, NULL); | |
598 | ||
599 | tl->tl_offset = offset; | |
600 | ||
601 | for (t = 0; t < TXG_SIZE; t++) | |
602 | tl->tl_head[t] = NULL; | |
603 | } | |
604 | ||
605 | void | |
606 | txg_list_destroy(txg_list_t *tl) | |
607 | { | |
608 | int t; | |
609 | ||
610 | for (t = 0; t < TXG_SIZE; t++) | |
611 | ASSERT(txg_list_empty(tl, t)); | |
612 | ||
613 | mutex_destroy(&tl->tl_lock); | |
614 | } | |
615 | ||
616 | int | |
617 | txg_list_empty(txg_list_t *tl, uint64_t txg) | |
618 | { | |
619 | return (tl->tl_head[txg & TXG_MASK] == NULL); | |
620 | } | |
621 | ||
622 | /* | |
623 | * Add an entry to the list. | |
624 | * Returns 0 if it's a new entry, 1 if it's already there. | |
625 | */ | |
626 | int | |
627 | txg_list_add(txg_list_t *tl, void *p, uint64_t txg) | |
628 | { | |
629 | int t = txg & TXG_MASK; | |
630 | txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset); | |
631 | int already_on_list; | |
632 | ||
633 | mutex_enter(&tl->tl_lock); | |
634 | already_on_list = tn->tn_member[t]; | |
635 | if (!already_on_list) { | |
636 | tn->tn_member[t] = 1; | |
637 | tn->tn_next[t] = tl->tl_head[t]; | |
638 | tl->tl_head[t] = tn; | |
639 | } | |
640 | mutex_exit(&tl->tl_lock); | |
641 | ||
642 | return (already_on_list); | |
643 | } | |
644 | ||
428870ff BB |
645 | /* |
646 | * Add an entry to the end of the list (walks list to find end). | |
647 | * Returns 0 if it's a new entry, 1 if it's already there. | |
648 | */ | |
649 | int | |
650 | txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg) | |
651 | { | |
652 | int t = txg & TXG_MASK; | |
653 | txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset); | |
654 | int already_on_list; | |
655 | ||
656 | mutex_enter(&tl->tl_lock); | |
657 | already_on_list = tn->tn_member[t]; | |
658 | if (!already_on_list) { | |
659 | txg_node_t **tp; | |
660 | ||
661 | for (tp = &tl->tl_head[t]; *tp != NULL; tp = &(*tp)->tn_next[t]) | |
662 | continue; | |
663 | ||
664 | tn->tn_member[t] = 1; | |
665 | tn->tn_next[t] = NULL; | |
666 | *tp = tn; | |
667 | } | |
668 | mutex_exit(&tl->tl_lock); | |
669 | ||
670 | return (already_on_list); | |
671 | } | |
672 | ||
34dc7c2f BB |
673 | /* |
674 | * Remove the head of the list and return it. | |
675 | */ | |
676 | void * | |
677 | txg_list_remove(txg_list_t *tl, uint64_t txg) | |
678 | { | |
679 | int t = txg & TXG_MASK; | |
680 | txg_node_t *tn; | |
681 | void *p = NULL; | |
682 | ||
683 | mutex_enter(&tl->tl_lock); | |
684 | if ((tn = tl->tl_head[t]) != NULL) { | |
685 | p = (char *)tn - tl->tl_offset; | |
686 | tl->tl_head[t] = tn->tn_next[t]; | |
687 | tn->tn_next[t] = NULL; | |
688 | tn->tn_member[t] = 0; | |
689 | } | |
690 | mutex_exit(&tl->tl_lock); | |
691 | ||
692 | return (p); | |
693 | } | |
694 | ||
695 | /* | |
696 | * Remove a specific item from the list and return it. | |
697 | */ | |
698 | void * | |
699 | txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg) | |
700 | { | |
701 | int t = txg & TXG_MASK; | |
702 | txg_node_t *tn, **tp; | |
703 | ||
704 | mutex_enter(&tl->tl_lock); | |
705 | ||
706 | for (tp = &tl->tl_head[t]; (tn = *tp) != NULL; tp = &tn->tn_next[t]) { | |
707 | if ((char *)tn - tl->tl_offset == p) { | |
708 | *tp = tn->tn_next[t]; | |
709 | tn->tn_next[t] = NULL; | |
710 | tn->tn_member[t] = 0; | |
711 | mutex_exit(&tl->tl_lock); | |
712 | return (p); | |
713 | } | |
714 | } | |
715 | ||
716 | mutex_exit(&tl->tl_lock); | |
717 | ||
718 | return (NULL); | |
719 | } | |
720 | ||
721 | int | |
722 | txg_list_member(txg_list_t *tl, void *p, uint64_t txg) | |
723 | { | |
724 | int t = txg & TXG_MASK; | |
725 | txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset); | |
726 | ||
727 | return (tn->tn_member[t]); | |
728 | } | |
729 | ||
730 | /* | |
731 | * Walk a txg list -- only safe if you know it's not changing. | |
732 | */ | |
733 | void * | |
734 | txg_list_head(txg_list_t *tl, uint64_t txg) | |
735 | { | |
736 | int t = txg & TXG_MASK; | |
737 | txg_node_t *tn = tl->tl_head[t]; | |
738 | ||
739 | return (tn == NULL ? NULL : (char *)tn - tl->tl_offset); | |
740 | } | |
741 | ||
742 | void * | |
743 | txg_list_next(txg_list_t *tl, void *p, uint64_t txg) | |
744 | { | |
745 | int t = txg & TXG_MASK; | |
746 | txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset); | |
747 | ||
748 | tn = tn->tn_next[t]; | |
749 | ||
750 | return (tn == NULL ? NULL : (char *)tn - tl->tl_offset); | |
751 | } | |
c28b2279 BB |
752 | |
753 | #if defined(_KERNEL) && defined(HAVE_SPL) | |
754 | EXPORT_SYMBOL(txg_init); | |
755 | EXPORT_SYMBOL(txg_fini); | |
756 | EXPORT_SYMBOL(txg_sync_start); | |
757 | EXPORT_SYMBOL(txg_sync_stop); | |
758 | EXPORT_SYMBOL(txg_hold_open); | |
759 | EXPORT_SYMBOL(txg_rele_to_quiesce); | |
760 | EXPORT_SYMBOL(txg_rele_to_sync); | |
761 | EXPORT_SYMBOL(txg_register_callbacks); | |
762 | EXPORT_SYMBOL(txg_delay); | |
763 | EXPORT_SYMBOL(txg_wait_synced); | |
764 | EXPORT_SYMBOL(txg_wait_open); | |
54a179e7 | 765 | EXPORT_SYMBOL(txg_wait_callbacks); |
c28b2279 BB |
766 | EXPORT_SYMBOL(txg_stalled); |
767 | EXPORT_SYMBOL(txg_sync_waiting); | |
768 | #endif |