]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
29809a6c | 23 | * Portions Copyright 2011 Martin Matuska |
acbad6ff | 24 | * Copyright (c) 2012, 2014 by Delphix. All rights reserved. |
34dc7c2f BB |
25 | */ |
26 | ||
34dc7c2f BB |
27 | #include <sys/zfs_context.h> |
28 | #include <sys/txg_impl.h> | |
29 | #include <sys/dmu_impl.h> | |
0b1401ee | 30 | #include <sys/spa_impl.h> |
428870ff | 31 | #include <sys/dmu_tx.h> |
34dc7c2f | 32 | #include <sys/dsl_pool.h> |
428870ff | 33 | #include <sys/dsl_scan.h> |
34dc7c2f | 34 | #include <sys/callb.h> |
49ee64e5 | 35 | #include <sys/trace_txg.h> |
34dc7c2f BB |
36 | |
37 | /* | |
89103a26 AL |
38 | * ZFS Transaction Groups |
39 | * ---------------------- | |
40 | * | |
41 | * ZFS transaction groups are, as the name implies, groups of transactions | |
42 | * that act on persistent state. ZFS asserts consistency at the granularity of | |
43 | * these transaction groups. Each successive transaction group (txg) is | |
44 | * assigned a 64-bit consecutive identifier. There are three active | |
45 | * transaction group states: open, quiescing, or syncing. At any given time, | |
46 | * there may be an active txg associated with each state; each active txg may | |
47 | * either be processing, or blocked waiting to enter the next state. There may | |
48 | * be up to three active txgs, and there is always a txg in the open state | |
49 | * (though it may be blocked waiting to enter the quiescing state). In broad | |
e8b96c60 | 50 | * strokes, transactions -- operations that change in-memory structures -- are |
89103a26 AL |
51 | * accepted into the txg in the open state, and are completed while the txg is |
52 | * in the open or quiescing states. The accumulated changes are written to | |
53 | * disk in the syncing state. | |
54 | * | |
55 | * Open | |
56 | * | |
57 | * When a new txg becomes active, it first enters the open state. New | |
e8b96c60 | 58 | * transactions -- updates to in-memory structures -- are assigned to the |
89103a26 AL |
59 | * currently open txg. There is always a txg in the open state so that ZFS can |
60 | * accept new changes (though the txg may refuse new changes if it has hit | |
61 | * some limit). ZFS advances the open txg to the next state for a variety of | |
62 | * reasons such as it hitting a time or size threshold, or the execution of an | |
63 | * administrative action that must be completed in the syncing state. | |
64 | * | |
65 | * Quiescing | |
66 | * | |
67 | * After a txg exits the open state, it enters the quiescing state. The | |
68 | * quiescing state is intended to provide a buffer between accepting new | |
69 | * transactions in the open state and writing them out to stable storage in | |
70 | * the syncing state. While quiescing, transactions can continue their | |
71 | * operation without delaying either of the other states. Typically, a txg is | |
72 | * in the quiescing state very briefly since the operations are bounded by | |
73 | * software latencies rather than, say, slower I/O latencies. After all | |
74 | * transactions complete, the txg is ready to enter the next state. | |
75 | * | |
76 | * Syncing | |
77 | * | |
78 | * In the syncing state, the in-memory state built up during the open and (to | |
79 | * a lesser degree) the quiescing states is written to stable storage. The | |
80 | * process of writing out modified data can, in turn modify more data. For | |
81 | * example when we write new blocks, we need to allocate space for them; those | |
82 | * allocations modify metadata (space maps)... which themselves must be | |
83 | * written to stable storage. During the sync state, ZFS iterates, writing out | |
84 | * data until it converges and all in-memory changes have been written out. | |
85 | * The first such pass is the largest as it encompasses all the modified user | |
86 | * data (as opposed to filesystem metadata). Subsequent passes typically have | |
87 | * far less data to write as they consist exclusively of filesystem metadata. | |
88 | * | |
89 | * To ensure convergence, after a certain number of passes ZFS begins | |
90 | * overwriting locations on stable storage that had been allocated earlier in | |
91 | * the syncing state (and subsequently freed). ZFS usually allocates new | |
92 | * blocks to optimize for large, continuous, writes. For the syncing state to | |
93 | * converge however it must complete a pass where no new blocks are allocated | |
94 | * since each allocation requires a modification of persistent metadata. | |
95 | * Further, to hasten convergence, after a prescribed number of passes, ZFS | |
96 | * also defers frees, and stops compressing. | |
97 | * | |
98 | * In addition to writing out user data, we must also execute synctasks during | |
99 | * the syncing context. A synctask is the mechanism by which some | |
100 | * administrative activities work such as creating and destroying snapshots or | |
101 | * datasets. Note that when a synctask is initiated it enters the open txg, | |
102 | * and ZFS then pushes that txg as quickly as possible to completion of the | |
103 | * syncing state in order to reduce the latency of the administrative | |
104 | * activity. To complete the syncing state, ZFS writes out a new uberblock, | |
105 | * the root of the tree of blocks that comprise all state stored on the ZFS | |
106 | * pool. Finally, if there is a quiesced txg waiting, we signal that it can | |
107 | * now transition to the syncing state. | |
34dc7c2f BB |
108 | */ |
109 | ||
110 | static void txg_sync_thread(dsl_pool_t *dp); | |
111 | static void txg_quiesce_thread(dsl_pool_t *dp); | |
112 | ||
572e2857 | 113 | int zfs_txg_timeout = 5; /* max seconds worth of delta per txg */ |
34dc7c2f BB |
114 | |
115 | /* | |
116 | * Prepare the txg subsystem. | |
117 | */ | |
118 | void | |
119 | txg_init(dsl_pool_t *dp, uint64_t txg) | |
120 | { | |
121 | tx_state_t *tx = &dp->dp_tx; | |
122 | int c; | |
123 | bzero(tx, sizeof (tx_state_t)); | |
124 | ||
00b46022 | 125 | tx->tx_cpu = vmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP); |
34dc7c2f BB |
126 | |
127 | for (c = 0; c < max_ncpus; c++) { | |
128 | int i; | |
129 | ||
130 | mutex_init(&tx->tx_cpu[c].tc_lock, NULL, MUTEX_DEFAULT, NULL); | |
448d7aaa | 131 | mutex_init(&tx->tx_cpu[c].tc_open_lock, NULL, MUTEX_NOLOCKDEP, |
2696dfaf | 132 | NULL); |
34dc7c2f BB |
133 | for (i = 0; i < TXG_SIZE; i++) { |
134 | cv_init(&tx->tx_cpu[c].tc_cv[i], NULL, CV_DEFAULT, | |
135 | NULL); | |
428870ff BB |
136 | list_create(&tx->tx_cpu[c].tc_callbacks[i], |
137 | sizeof (dmu_tx_callback_t), | |
138 | offsetof(dmu_tx_callback_t, dcb_node)); | |
34dc7c2f BB |
139 | } |
140 | } | |
141 | ||
34dc7c2f BB |
142 | mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL); |
143 | ||
fb5f0bc8 BB |
144 | cv_init(&tx->tx_sync_more_cv, NULL, CV_DEFAULT, NULL); |
145 | cv_init(&tx->tx_sync_done_cv, NULL, CV_DEFAULT, NULL); | |
146 | cv_init(&tx->tx_quiesce_more_cv, NULL, CV_DEFAULT, NULL); | |
147 | cv_init(&tx->tx_quiesce_done_cv, NULL, CV_DEFAULT, NULL); | |
148 | cv_init(&tx->tx_exit_cv, NULL, CV_DEFAULT, NULL); | |
149 | ||
34dc7c2f BB |
150 | tx->tx_open_txg = txg; |
151 | } | |
152 | ||
153 | /* | |
154 | * Close down the txg subsystem. | |
155 | */ | |
156 | void | |
157 | txg_fini(dsl_pool_t *dp) | |
158 | { | |
159 | tx_state_t *tx = &dp->dp_tx; | |
160 | int c; | |
161 | ||
162 | ASSERT(tx->tx_threads == 0); | |
163 | ||
34dc7c2f BB |
164 | mutex_destroy(&tx->tx_sync_lock); |
165 | ||
fb5f0bc8 BB |
166 | cv_destroy(&tx->tx_sync_more_cv); |
167 | cv_destroy(&tx->tx_sync_done_cv); | |
168 | cv_destroy(&tx->tx_quiesce_more_cv); | |
169 | cv_destroy(&tx->tx_quiesce_done_cv); | |
170 | cv_destroy(&tx->tx_exit_cv); | |
171 | ||
34dc7c2f BB |
172 | for (c = 0; c < max_ncpus; c++) { |
173 | int i; | |
174 | ||
2696dfaf | 175 | mutex_destroy(&tx->tx_cpu[c].tc_open_lock); |
34dc7c2f | 176 | mutex_destroy(&tx->tx_cpu[c].tc_lock); |
428870ff | 177 | for (i = 0; i < TXG_SIZE; i++) { |
34dc7c2f | 178 | cv_destroy(&tx->tx_cpu[c].tc_cv[i]); |
428870ff BB |
179 | list_destroy(&tx->tx_cpu[c].tc_callbacks[i]); |
180 | } | |
34dc7c2f BB |
181 | } |
182 | ||
428870ff BB |
183 | if (tx->tx_commit_cb_taskq != NULL) |
184 | taskq_destroy(tx->tx_commit_cb_taskq); | |
185 | ||
00b46022 | 186 | vmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t)); |
34dc7c2f BB |
187 | |
188 | bzero(tx, sizeof (tx_state_t)); | |
189 | } | |
190 | ||
191 | /* | |
192 | * Start syncing transaction groups. | |
193 | */ | |
194 | void | |
195 | txg_sync_start(dsl_pool_t *dp) | |
196 | { | |
197 | tx_state_t *tx = &dp->dp_tx; | |
198 | ||
199 | mutex_enter(&tx->tx_sync_lock); | |
200 | ||
201 | dprintf("pool %p\n", dp); | |
202 | ||
203 | ASSERT(tx->tx_threads == 0); | |
204 | ||
205 | tx->tx_threads = 2; | |
206 | ||
207 | tx->tx_quiesce_thread = thread_create(NULL, 0, txg_quiesce_thread, | |
1229323d | 208 | dp, 0, &p0, TS_RUN, defclsyspri); |
34dc7c2f | 209 | |
b128c09f BB |
210 | /* |
211 | * The sync thread can need a larger-than-default stack size on | |
212 | * 32-bit x86. This is due in part to nested pools and | |
213 | * scrub_visitbp() recursion. | |
214 | */ | |
89666a8e | 215 | tx->tx_sync_thread = thread_create(NULL, 0, txg_sync_thread, |
1229323d | 216 | dp, 0, &p0, TS_RUN, defclsyspri); |
34dc7c2f BB |
217 | |
218 | mutex_exit(&tx->tx_sync_lock); | |
219 | } | |
220 | ||
221 | static void | |
222 | txg_thread_enter(tx_state_t *tx, callb_cpr_t *cpr) | |
223 | { | |
224 | CALLB_CPR_INIT(cpr, &tx->tx_sync_lock, callb_generic_cpr, FTAG); | |
225 | mutex_enter(&tx->tx_sync_lock); | |
226 | } | |
227 | ||
228 | static void | |
229 | txg_thread_exit(tx_state_t *tx, callb_cpr_t *cpr, kthread_t **tpp) | |
230 | { | |
231 | ASSERT(*tpp != NULL); | |
232 | *tpp = NULL; | |
233 | tx->tx_threads--; | |
234 | cv_broadcast(&tx->tx_exit_cv); | |
235 | CALLB_CPR_EXIT(cpr); /* drops &tx->tx_sync_lock */ | |
236 | thread_exit(); | |
237 | } | |
238 | ||
239 | static void | |
63fd3c6c | 240 | txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, clock_t time) |
34dc7c2f BB |
241 | { |
242 | CALLB_CPR_SAFE_BEGIN(cpr); | |
243 | ||
244 | if (time) | |
b64ccd6c | 245 | (void) cv_timedwait_sig(cv, &tx->tx_sync_lock, |
428870ff | 246 | ddi_get_lbolt() + time); |
34dc7c2f | 247 | else |
b64ccd6c | 248 | cv_wait_sig(cv, &tx->tx_sync_lock); |
34dc7c2f BB |
249 | |
250 | CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock); | |
251 | } | |
252 | ||
253 | /* | |
254 | * Stop syncing transaction groups. | |
255 | */ | |
256 | void | |
257 | txg_sync_stop(dsl_pool_t *dp) | |
258 | { | |
259 | tx_state_t *tx = &dp->dp_tx; | |
260 | ||
261 | dprintf("pool %p\n", dp); | |
262 | /* | |
263 | * Finish off any work in progress. | |
264 | */ | |
265 | ASSERT(tx->tx_threads == 2); | |
428870ff BB |
266 | |
267 | /* | |
268 | * We need to ensure that we've vacated the deferred space_maps. | |
269 | */ | |
270 | txg_wait_synced(dp, tx->tx_open_txg + TXG_DEFER_SIZE); | |
34dc7c2f BB |
271 | |
272 | /* | |
273 | * Wake all sync threads and wait for them to die. | |
274 | */ | |
275 | mutex_enter(&tx->tx_sync_lock); | |
276 | ||
277 | ASSERT(tx->tx_threads == 2); | |
278 | ||
279 | tx->tx_exiting = 1; | |
280 | ||
281 | cv_broadcast(&tx->tx_quiesce_more_cv); | |
282 | cv_broadcast(&tx->tx_quiesce_done_cv); | |
283 | cv_broadcast(&tx->tx_sync_more_cv); | |
284 | ||
285 | while (tx->tx_threads != 0) | |
286 | cv_wait(&tx->tx_exit_cv, &tx->tx_sync_lock); | |
287 | ||
288 | tx->tx_exiting = 0; | |
289 | ||
290 | mutex_exit(&tx->tx_sync_lock); | |
291 | } | |
292 | ||
293 | uint64_t | |
294 | txg_hold_open(dsl_pool_t *dp, txg_handle_t *th) | |
295 | { | |
296 | tx_state_t *tx = &dp->dp_tx; | |
15a9e033 | 297 | tx_cpu_t *tc; |
34dc7c2f BB |
298 | uint64_t txg; |
299 | ||
15a9e033 PS |
300 | /* |
301 | * It appears the processor id is simply used as a "random" | |
302 | * number to index into the array, and there isn't any other | |
303 | * significance to the chosen tx_cpu. Because.. Why not use | |
304 | * the current cpu to index into the array? | |
305 | */ | |
306 | kpreempt_disable(); | |
307 | tc = &tx->tx_cpu[CPU_SEQID]; | |
308 | kpreempt_enable(); | |
309 | ||
2696dfaf | 310 | mutex_enter(&tc->tc_open_lock); |
34dc7c2f | 311 | txg = tx->tx_open_txg; |
2696dfaf GW |
312 | |
313 | mutex_enter(&tc->tc_lock); | |
34dc7c2f | 314 | tc->tc_count[txg & TXG_MASK]++; |
2696dfaf | 315 | mutex_exit(&tc->tc_lock); |
34dc7c2f BB |
316 | |
317 | th->th_cpu = tc; | |
318 | th->th_txg = txg; | |
319 | ||
320 | return (txg); | |
321 | } | |
322 | ||
323 | void | |
324 | txg_rele_to_quiesce(txg_handle_t *th) | |
325 | { | |
326 | tx_cpu_t *tc = th->th_cpu; | |
327 | ||
2696dfaf GW |
328 | ASSERT(!MUTEX_HELD(&tc->tc_lock)); |
329 | mutex_exit(&tc->tc_open_lock); | |
34dc7c2f BB |
330 | } |
331 | ||
428870ff BB |
332 | void |
333 | txg_register_callbacks(txg_handle_t *th, list_t *tx_callbacks) | |
334 | { | |
335 | tx_cpu_t *tc = th->th_cpu; | |
336 | int g = th->th_txg & TXG_MASK; | |
337 | ||
338 | mutex_enter(&tc->tc_lock); | |
339 | list_move_tail(&tc->tc_callbacks[g], tx_callbacks); | |
340 | mutex_exit(&tc->tc_lock); | |
341 | } | |
342 | ||
34dc7c2f BB |
343 | void |
344 | txg_rele_to_sync(txg_handle_t *th) | |
345 | { | |
346 | tx_cpu_t *tc = th->th_cpu; | |
347 | int g = th->th_txg & TXG_MASK; | |
348 | ||
349 | mutex_enter(&tc->tc_lock); | |
350 | ASSERT(tc->tc_count[g] != 0); | |
351 | if (--tc->tc_count[g] == 0) | |
352 | cv_broadcast(&tc->tc_cv[g]); | |
353 | mutex_exit(&tc->tc_lock); | |
354 | ||
355 | th->th_cpu = NULL; /* defensive */ | |
356 | } | |
357 | ||
e49f1e20 WA |
358 | /* |
359 | * Blocks until all transactions in the group are committed. | |
360 | * | |
361 | * On return, the transaction group has reached a stable state in which it can | |
362 | * then be passed off to the syncing context. | |
363 | */ | |
34dc7c2f BB |
364 | static void |
365 | txg_quiesce(dsl_pool_t *dp, uint64_t txg) | |
366 | { | |
367 | tx_state_t *tx = &dp->dp_tx; | |
f26b4b3c | 368 | uint64_t tx_open_time; |
34dc7c2f BB |
369 | int g = txg & TXG_MASK; |
370 | int c; | |
371 | ||
372 | /* | |
2696dfaf | 373 | * Grab all tc_open_locks so nobody else can get into this txg. |
34dc7c2f BB |
374 | */ |
375 | for (c = 0; c < max_ncpus; c++) | |
2696dfaf | 376 | mutex_enter(&tx->tx_cpu[c].tc_open_lock); |
34dc7c2f BB |
377 | |
378 | ASSERT(txg == tx->tx_open_txg); | |
379 | tx->tx_open_txg++; | |
f26b4b3c | 380 | tx->tx_open_time = tx_open_time = gethrtime(); |
0b1401ee | 381 | |
63fd3c6c AL |
382 | DTRACE_PROBE2(txg__quiescing, dsl_pool_t *, dp, uint64_t, txg); |
383 | DTRACE_PROBE2(txg__opened, dsl_pool_t *, dp, uint64_t, tx->tx_open_txg); | |
384 | ||
57f5a200 BB |
385 | /* |
386 | * Now that we've incremented tx_open_txg, we can let threads | |
387 | * enter the next transaction group. | |
388 | */ | |
389 | for (c = 0; c < max_ncpus; c++) | |
2696dfaf | 390 | mutex_exit(&tx->tx_cpu[c].tc_open_lock); |
57f5a200 | 391 | |
f26b4b3c RY |
392 | spa_txg_history_set(dp->dp_spa, txg, TXG_STATE_OPEN, tx_open_time); |
393 | spa_txg_history_add(dp->dp_spa, txg + 1, tx_open_time); | |
394 | ||
34dc7c2f BB |
395 | /* |
396 | * Quiesce the transaction group by waiting for everyone to txg_exit(). | |
397 | */ | |
398 | for (c = 0; c < max_ncpus; c++) { | |
399 | tx_cpu_t *tc = &tx->tx_cpu[c]; | |
400 | mutex_enter(&tc->tc_lock); | |
401 | while (tc->tc_count[g] != 0) | |
402 | cv_wait(&tc->tc_cv[g], &tc->tc_lock); | |
403 | mutex_exit(&tc->tc_lock); | |
404 | } | |
0b1401ee BB |
405 | |
406 | spa_txg_history_set(dp->dp_spa, txg, TXG_STATE_QUIESCED, gethrtime()); | |
34dc7c2f BB |
407 | } |
408 | ||
428870ff BB |
409 | static void |
410 | txg_do_callbacks(list_t *cb_list) | |
411 | { | |
412 | dmu_tx_do_callbacks(cb_list, 0); | |
413 | ||
414 | list_destroy(cb_list); | |
415 | ||
416 | kmem_free(cb_list, sizeof (list_t)); | |
417 | } | |
418 | ||
419 | /* | |
420 | * Dispatch the commit callbacks registered on this txg to worker threads. | |
e49f1e20 WA |
421 | * |
422 | * If no callbacks are registered for a given TXG, nothing happens. | |
423 | * This function creates a taskq for the associated pool, if needed. | |
428870ff BB |
424 | */ |
425 | static void | |
426 | txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg) | |
427 | { | |
428 | int c; | |
429 | tx_state_t *tx = &dp->dp_tx; | |
430 | list_t *cb_list; | |
431 | ||
432 | for (c = 0; c < max_ncpus; c++) { | |
433 | tx_cpu_t *tc = &tx->tx_cpu[c]; | |
e49f1e20 WA |
434 | /* |
435 | * No need to lock tx_cpu_t at this point, since this can | |
436 | * only be called once a txg has been synced. | |
437 | */ | |
428870ff BB |
438 | |
439 | int g = txg & TXG_MASK; | |
440 | ||
441 | if (list_is_empty(&tc->tc_callbacks[g])) | |
442 | continue; | |
443 | ||
444 | if (tx->tx_commit_cb_taskq == NULL) { | |
445 | /* | |
446 | * Commit callback taskq hasn't been created yet. | |
447 | */ | |
448 | tx->tx_commit_cb_taskq = taskq_create("tx_commit_cb", | |
1229323d | 449 | max_ncpus, defclsyspri, max_ncpus, max_ncpus * 2, |
aa9af22c | 450 | TASKQ_PREPOPULATE | TASKQ_DYNAMIC); |
428870ff BB |
451 | } |
452 | ||
79c76d5b | 453 | cb_list = kmem_alloc(sizeof (list_t), KM_SLEEP); |
428870ff BB |
454 | list_create(cb_list, sizeof (dmu_tx_callback_t), |
455 | offsetof(dmu_tx_callback_t, dcb_node)); | |
456 | ||
090ff092 | 457 | list_move_tail(cb_list, &tc->tc_callbacks[g]); |
428870ff BB |
458 | |
459 | (void) taskq_dispatch(tx->tx_commit_cb_taskq, (task_func_t *) | |
460 | txg_do_callbacks, cb_list, TQ_SLEEP); | |
461 | } | |
462 | } | |
463 | ||
54a179e7 RC |
464 | /* |
465 | * Wait for pending commit callbacks of already-synced transactions to finish | |
466 | * processing. | |
467 | * Calling this function from within a commit callback will deadlock. | |
468 | */ | |
469 | void | |
470 | txg_wait_callbacks(dsl_pool_t *dp) | |
471 | { | |
472 | tx_state_t *tx = &dp->dp_tx; | |
473 | ||
474 | if (tx->tx_commit_cb_taskq != NULL) | |
c5528b9b | 475 | taskq_wait_outstanding(tx->tx_commit_cb_taskq, 0); |
54a179e7 RC |
476 | } |
477 | ||
34dc7c2f BB |
478 | static void |
479 | txg_sync_thread(dsl_pool_t *dp) | |
480 | { | |
428870ff | 481 | spa_t *spa = dp->dp_spa; |
34dc7c2f BB |
482 | tx_state_t *tx = &dp->dp_tx; |
483 | callb_cpr_t cpr; | |
0b1401ee | 484 | vdev_stat_t *vs1, *vs2; |
0b75bdb3 | 485 | clock_t start, delta; |
34dc7c2f | 486 | |
92119cc2 | 487 | (void) spl_fstrans_mark(); |
34dc7c2f BB |
488 | txg_thread_enter(tx, &cpr); |
489 | ||
79c76d5b BB |
490 | vs1 = kmem_alloc(sizeof (vdev_stat_t), KM_SLEEP); |
491 | vs2 = kmem_alloc(sizeof (vdev_stat_t), KM_SLEEP); | |
0b1401ee | 492 | |
34dc7c2f | 493 | start = delta = 0; |
34dc7c2f | 494 | for (;;) { |
0b75bdb3 | 495 | clock_t timer, timeout; |
b128c09f | 496 | uint64_t txg; |
3ccab252 | 497 | uint64_t ndirty; |
34dc7c2f | 498 | |
87d98efe BB |
499 | timeout = zfs_txg_timeout * hz; |
500 | ||
34dc7c2f | 501 | /* |
428870ff | 502 | * We sync when we're scanning, there's someone waiting |
b128c09f BB |
503 | * on us, or the quiesce thread has handed off a txg to |
504 | * us, or we have reached our timeout. | |
34dc7c2f BB |
505 | */ |
506 | timer = (delta >= timeout ? 0 : timeout - delta); | |
428870ff | 507 | while (!dsl_scan_active(dp->dp_scan) && |
b128c09f | 508 | !tx->tx_exiting && timer > 0 && |
34dc7c2f | 509 | tx->tx_synced_txg >= tx->tx_sync_txg_waiting && |
e8b96c60 MA |
510 | tx->tx_quiesced_txg == 0 && |
511 | dp->dp_dirty_total < zfs_dirty_data_sync) { | |
34dc7c2f BB |
512 | dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n", |
513 | tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp); | |
514 | txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer); | |
428870ff | 515 | delta = ddi_get_lbolt() - start; |
34dc7c2f BB |
516 | timer = (delta > timeout ? 0 : timeout - delta); |
517 | } | |
518 | ||
519 | /* | |
520 | * Wait until the quiesce thread hands off a txg to us, | |
521 | * prompting it to do so if necessary. | |
522 | */ | |
523 | while (!tx->tx_exiting && tx->tx_quiesced_txg == 0) { | |
524 | if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1) | |
525 | tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1; | |
526 | cv_broadcast(&tx->tx_quiesce_more_cv); | |
527 | txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0); | |
528 | } | |
529 | ||
0b1401ee | 530 | if (tx->tx_exiting) { |
d1d7e268 MK |
531 | kmem_free(vs2, sizeof (vdev_stat_t)); |
532 | kmem_free(vs1, sizeof (vdev_stat_t)); | |
34dc7c2f | 533 | txg_thread_exit(tx, &cpr, &tx->tx_sync_thread); |
0b1401ee BB |
534 | } |
535 | ||
f3a7f661 | 536 | spa_config_enter(spa, SCL_ALL, FTAG, RW_READER); |
0b1401ee | 537 | vdev_get_stats(spa->spa_root_vdev, vs1); |
f3a7f661 | 538 | spa_config_exit(spa, SCL_ALL, FTAG); |
34dc7c2f | 539 | |
34dc7c2f BB |
540 | /* |
541 | * Consume the quiesced txg which has been handed off to | |
542 | * us. This may cause the quiescing thread to now be | |
543 | * able to quiesce another txg, so we must signal it. | |
544 | */ | |
545 | txg = tx->tx_quiesced_txg; | |
546 | tx->tx_quiesced_txg = 0; | |
547 | tx->tx_syncing_txg = txg; | |
63fd3c6c | 548 | DTRACE_PROBE2(txg__syncing, dsl_pool_t *, dp, uint64_t, txg); |
34dc7c2f | 549 | cv_broadcast(&tx->tx_quiesce_more_cv); |
34dc7c2f BB |
550 | |
551 | dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n", | |
552 | txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting); | |
553 | mutex_exit(&tx->tx_sync_lock); | |
b128c09f | 554 | |
478d64fd IL |
555 | spa_txg_history_set(spa, txg, TXG_STATE_WAIT_FOR_SYNC, |
556 | gethrtime()); | |
3ccab252 | 557 | ndirty = dp->dp_dirty_pertxg[txg & TXG_MASK]; |
478d64fd | 558 | |
428870ff BB |
559 | start = ddi_get_lbolt(); |
560 | spa_sync(spa, txg); | |
561 | delta = ddi_get_lbolt() - start; | |
34dc7c2f | 562 | |
34dc7c2f | 563 | mutex_enter(&tx->tx_sync_lock); |
34dc7c2f BB |
564 | tx->tx_synced_txg = txg; |
565 | tx->tx_syncing_txg = 0; | |
63fd3c6c | 566 | DTRACE_PROBE2(txg__synced, dsl_pool_t *, dp, uint64_t, txg); |
34dc7c2f | 567 | cv_broadcast(&tx->tx_sync_done_cv); |
428870ff BB |
568 | |
569 | /* | |
570 | * Dispatch commit callbacks to worker threads. | |
571 | */ | |
572 | txg_dispatch_callbacks(dp, txg); | |
0b1401ee | 573 | |
f3a7f661 | 574 | spa_config_enter(spa, SCL_ALL, FTAG, RW_READER); |
0b1401ee | 575 | vdev_get_stats(spa->spa_root_vdev, vs2); |
f3a7f661 | 576 | spa_config_exit(spa, SCL_ALL, FTAG); |
0b1401ee BB |
577 | spa_txg_history_set_io(spa, txg, |
578 | vs2->vs_bytes[ZIO_TYPE_READ]-vs1->vs_bytes[ZIO_TYPE_READ], | |
579 | vs2->vs_bytes[ZIO_TYPE_WRITE]-vs1->vs_bytes[ZIO_TYPE_WRITE], | |
580 | vs2->vs_ops[ZIO_TYPE_READ]-vs1->vs_ops[ZIO_TYPE_READ], | |
581 | vs2->vs_ops[ZIO_TYPE_WRITE]-vs1->vs_ops[ZIO_TYPE_WRITE], | |
3ccab252 | 582 | ndirty); |
0b1401ee | 583 | spa_txg_history_set(spa, txg, TXG_STATE_SYNCED, gethrtime()); |
34dc7c2f BB |
584 | } |
585 | } | |
586 | ||
587 | static void | |
588 | txg_quiesce_thread(dsl_pool_t *dp) | |
589 | { | |
590 | tx_state_t *tx = &dp->dp_tx; | |
591 | callb_cpr_t cpr; | |
592 | ||
593 | txg_thread_enter(tx, &cpr); | |
594 | ||
595 | for (;;) { | |
596 | uint64_t txg; | |
597 | ||
598 | /* | |
599 | * We quiesce when there's someone waiting on us. | |
600 | * However, we can only have one txg in "quiescing" or | |
601 | * "quiesced, waiting to sync" state. So we wait until | |
602 | * the "quiesced, waiting to sync" txg has been consumed | |
603 | * by the sync thread. | |
604 | */ | |
605 | while (!tx->tx_exiting && | |
606 | (tx->tx_open_txg >= tx->tx_quiesce_txg_waiting || | |
607 | tx->tx_quiesced_txg != 0)) | |
608 | txg_thread_wait(tx, &cpr, &tx->tx_quiesce_more_cv, 0); | |
609 | ||
610 | if (tx->tx_exiting) | |
611 | txg_thread_exit(tx, &cpr, &tx->tx_quiesce_thread); | |
612 | ||
613 | txg = tx->tx_open_txg; | |
614 | dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n", | |
615 | txg, tx->tx_quiesce_txg_waiting, | |
616 | tx->tx_sync_txg_waiting); | |
617 | mutex_exit(&tx->tx_sync_lock); | |
618 | txg_quiesce(dp, txg); | |
619 | mutex_enter(&tx->tx_sync_lock); | |
620 | ||
621 | /* | |
622 | * Hand this txg off to the sync thread. | |
623 | */ | |
624 | dprintf("quiesce done, handing off txg %llu\n", txg); | |
625 | tx->tx_quiesced_txg = txg; | |
63fd3c6c | 626 | DTRACE_PROBE2(txg__quiesced, dsl_pool_t *, dp, uint64_t, txg); |
34dc7c2f BB |
627 | cv_broadcast(&tx->tx_sync_more_cv); |
628 | cv_broadcast(&tx->tx_quiesce_done_cv); | |
629 | } | |
630 | } | |
631 | ||
632 | /* | |
63fd3c6c AL |
633 | * Delay this thread by delay nanoseconds if we are still in the open |
634 | * transaction group and there is already a waiting txg quiesing or quiesced. | |
635 | * Abort the delay if this txg stalls or enters the quiesing state. | |
34dc7c2f BB |
636 | */ |
637 | void | |
63fd3c6c | 638 | txg_delay(dsl_pool_t *dp, uint64_t txg, hrtime_t delay, hrtime_t resolution) |
34dc7c2f BB |
639 | { |
640 | tx_state_t *tx = &dp->dp_tx; | |
63fd3c6c | 641 | hrtime_t start = gethrtime(); |
34dc7c2f | 642 | |
d3cc8b15 | 643 | /* don't delay if this txg could transition to quiescing immediately */ |
34dc7c2f BB |
644 | if (tx->tx_open_txg > txg || |
645 | tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1) | |
646 | return; | |
647 | ||
648 | mutex_enter(&tx->tx_sync_lock); | |
649 | if (tx->tx_open_txg > txg || tx->tx_synced_txg == txg-1) { | |
650 | mutex_exit(&tx->tx_sync_lock); | |
651 | return; | |
652 | } | |
653 | ||
63fd3c6c AL |
654 | while (gethrtime() - start < delay && |
655 | tx->tx_syncing_txg < txg-1 && !txg_stalled(dp)) { | |
656 | (void) cv_timedwait_hires(&tx->tx_quiesce_more_cv, | |
657 | &tx->tx_sync_lock, delay, resolution, 0); | |
658 | } | |
34dc7c2f | 659 | |
570827e1 BB |
660 | DMU_TX_STAT_BUMP(dmu_tx_delay); |
661 | ||
34dc7c2f BB |
662 | mutex_exit(&tx->tx_sync_lock); |
663 | } | |
664 | ||
665 | void | |
666 | txg_wait_synced(dsl_pool_t *dp, uint64_t txg) | |
667 | { | |
668 | tx_state_t *tx = &dp->dp_tx; | |
669 | ||
13fe0198 MA |
670 | ASSERT(!dsl_pool_config_held(dp)); |
671 | ||
34dc7c2f BB |
672 | mutex_enter(&tx->tx_sync_lock); |
673 | ASSERT(tx->tx_threads == 2); | |
674 | if (txg == 0) | |
428870ff | 675 | txg = tx->tx_open_txg + TXG_DEFER_SIZE; |
34dc7c2f BB |
676 | if (tx->tx_sync_txg_waiting < txg) |
677 | tx->tx_sync_txg_waiting = txg; | |
678 | dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n", | |
679 | txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting); | |
680 | while (tx->tx_synced_txg < txg) { | |
681 | dprintf("broadcasting sync more " | |
682 | "tx_synced=%llu waiting=%llu dp=%p\n", | |
683 | tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp); | |
684 | cv_broadcast(&tx->tx_sync_more_cv); | |
685 | cv_wait(&tx->tx_sync_done_cv, &tx->tx_sync_lock); | |
686 | } | |
687 | mutex_exit(&tx->tx_sync_lock); | |
688 | } | |
689 | ||
690 | void | |
691 | txg_wait_open(dsl_pool_t *dp, uint64_t txg) | |
692 | { | |
693 | tx_state_t *tx = &dp->dp_tx; | |
694 | ||
13fe0198 MA |
695 | ASSERT(!dsl_pool_config_held(dp)); |
696 | ||
34dc7c2f BB |
697 | mutex_enter(&tx->tx_sync_lock); |
698 | ASSERT(tx->tx_threads == 2); | |
699 | if (txg == 0) | |
700 | txg = tx->tx_open_txg + 1; | |
701 | if (tx->tx_quiesce_txg_waiting < txg) | |
702 | tx->tx_quiesce_txg_waiting = txg; | |
703 | dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n", | |
704 | txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting); | |
705 | while (tx->tx_open_txg < txg) { | |
706 | cv_broadcast(&tx->tx_quiesce_more_cv); | |
707 | cv_wait(&tx->tx_quiesce_done_cv, &tx->tx_sync_lock); | |
708 | } | |
709 | mutex_exit(&tx->tx_sync_lock); | |
710 | } | |
711 | ||
e8b96c60 MA |
712 | /* |
713 | * If there isn't a txg syncing or in the pipeline, push another txg through | |
714 | * the pipeline by queiscing the open txg. | |
715 | */ | |
716 | void | |
717 | txg_kick(dsl_pool_t *dp) | |
718 | { | |
719 | tx_state_t *tx = &dp->dp_tx; | |
720 | ||
721 | ASSERT(!dsl_pool_config_held(dp)); | |
722 | ||
723 | mutex_enter(&tx->tx_sync_lock); | |
724 | if (tx->tx_syncing_txg == 0 && | |
725 | tx->tx_quiesce_txg_waiting <= tx->tx_open_txg && | |
726 | tx->tx_sync_txg_waiting <= tx->tx_synced_txg && | |
727 | tx->tx_quiesced_txg <= tx->tx_synced_txg) { | |
728 | tx->tx_quiesce_txg_waiting = tx->tx_open_txg + 1; | |
729 | cv_broadcast(&tx->tx_quiesce_more_cv); | |
730 | } | |
731 | mutex_exit(&tx->tx_sync_lock); | |
732 | } | |
733 | ||
b128c09f | 734 | boolean_t |
34dc7c2f BB |
735 | txg_stalled(dsl_pool_t *dp) |
736 | { | |
737 | tx_state_t *tx = &dp->dp_tx; | |
738 | return (tx->tx_quiesce_txg_waiting > tx->tx_open_txg); | |
739 | } | |
740 | ||
b128c09f BB |
741 | boolean_t |
742 | txg_sync_waiting(dsl_pool_t *dp) | |
743 | { | |
744 | tx_state_t *tx = &dp->dp_tx; | |
745 | ||
746 | return (tx->tx_syncing_txg <= tx->tx_sync_txg_waiting || | |
747 | tx->tx_quiesced_txg != 0); | |
748 | } | |
749 | ||
34dc7c2f BB |
750 | /* |
751 | * Per-txg object lists. | |
752 | */ | |
753 | void | |
754 | txg_list_create(txg_list_t *tl, size_t offset) | |
755 | { | |
756 | int t; | |
757 | ||
758 | mutex_init(&tl->tl_lock, NULL, MUTEX_DEFAULT, NULL); | |
759 | ||
760 | tl->tl_offset = offset; | |
761 | ||
762 | for (t = 0; t < TXG_SIZE; t++) | |
763 | tl->tl_head[t] = NULL; | |
764 | } | |
765 | ||
766 | void | |
767 | txg_list_destroy(txg_list_t *tl) | |
768 | { | |
769 | int t; | |
770 | ||
771 | for (t = 0; t < TXG_SIZE; t++) | |
772 | ASSERT(txg_list_empty(tl, t)); | |
773 | ||
774 | mutex_destroy(&tl->tl_lock); | |
775 | } | |
776 | ||
29809a6c | 777 | boolean_t |
34dc7c2f BB |
778 | txg_list_empty(txg_list_t *tl, uint64_t txg) |
779 | { | |
780 | return (tl->tl_head[txg & TXG_MASK] == NULL); | |
781 | } | |
782 | ||
acbad6ff AR |
783 | /* |
784 | * Returns true if all txg lists are empty. | |
785 | * | |
786 | * Warning: this is inherently racy (an item could be added immediately | |
787 | * after this function returns). We don't bother with the lock because | |
788 | * it wouldn't change the semantics. | |
789 | */ | |
790 | boolean_t | |
791 | txg_all_lists_empty(txg_list_t *tl) | |
792 | { | |
793 | int i; | |
794 | ||
795 | for (i = 0; i < TXG_SIZE; i++) { | |
796 | if (!txg_list_empty(tl, i)) { | |
797 | return (B_FALSE); | |
798 | } | |
799 | } | |
800 | return (B_TRUE); | |
801 | } | |
802 | ||
34dc7c2f | 803 | /* |
13fe0198 MA |
804 | * Add an entry to the list (unless it's already on the list). |
805 | * Returns B_TRUE if it was actually added. | |
34dc7c2f | 806 | */ |
13fe0198 | 807 | boolean_t |
34dc7c2f BB |
808 | txg_list_add(txg_list_t *tl, void *p, uint64_t txg) |
809 | { | |
810 | int t = txg & TXG_MASK; | |
811 | txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset); | |
13fe0198 | 812 | boolean_t add; |
34dc7c2f BB |
813 | |
814 | mutex_enter(&tl->tl_lock); | |
13fe0198 MA |
815 | add = (tn->tn_member[t] == 0); |
816 | if (add) { | |
34dc7c2f BB |
817 | tn->tn_member[t] = 1; |
818 | tn->tn_next[t] = tl->tl_head[t]; | |
819 | tl->tl_head[t] = tn; | |
820 | } | |
821 | mutex_exit(&tl->tl_lock); | |
822 | ||
13fe0198 | 823 | return (add); |
34dc7c2f BB |
824 | } |
825 | ||
428870ff | 826 | /* |
13fe0198 MA |
827 | * Add an entry to the end of the list, unless it's already on the list. |
828 | * (walks list to find end) | |
829 | * Returns B_TRUE if it was actually added. | |
428870ff | 830 | */ |
13fe0198 | 831 | boolean_t |
428870ff BB |
832 | txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg) |
833 | { | |
834 | int t = txg & TXG_MASK; | |
835 | txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset); | |
13fe0198 | 836 | boolean_t add; |
428870ff BB |
837 | |
838 | mutex_enter(&tl->tl_lock); | |
13fe0198 MA |
839 | add = (tn->tn_member[t] == 0); |
840 | if (add) { | |
428870ff BB |
841 | txg_node_t **tp; |
842 | ||
843 | for (tp = &tl->tl_head[t]; *tp != NULL; tp = &(*tp)->tn_next[t]) | |
844 | continue; | |
845 | ||
846 | tn->tn_member[t] = 1; | |
847 | tn->tn_next[t] = NULL; | |
848 | *tp = tn; | |
849 | } | |
850 | mutex_exit(&tl->tl_lock); | |
851 | ||
13fe0198 | 852 | return (add); |
428870ff BB |
853 | } |
854 | ||
34dc7c2f BB |
855 | /* |
856 | * Remove the head of the list and return it. | |
857 | */ | |
858 | void * | |
859 | txg_list_remove(txg_list_t *tl, uint64_t txg) | |
860 | { | |
861 | int t = txg & TXG_MASK; | |
862 | txg_node_t *tn; | |
863 | void *p = NULL; | |
864 | ||
865 | mutex_enter(&tl->tl_lock); | |
866 | if ((tn = tl->tl_head[t]) != NULL) { | |
867 | p = (char *)tn - tl->tl_offset; | |
868 | tl->tl_head[t] = tn->tn_next[t]; | |
869 | tn->tn_next[t] = NULL; | |
870 | tn->tn_member[t] = 0; | |
871 | } | |
872 | mutex_exit(&tl->tl_lock); | |
873 | ||
874 | return (p); | |
875 | } | |
876 | ||
877 | /* | |
878 | * Remove a specific item from the list and return it. | |
879 | */ | |
880 | void * | |
881 | txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg) | |
882 | { | |
883 | int t = txg & TXG_MASK; | |
884 | txg_node_t *tn, **tp; | |
885 | ||
886 | mutex_enter(&tl->tl_lock); | |
887 | ||
888 | for (tp = &tl->tl_head[t]; (tn = *tp) != NULL; tp = &tn->tn_next[t]) { | |
889 | if ((char *)tn - tl->tl_offset == p) { | |
890 | *tp = tn->tn_next[t]; | |
891 | tn->tn_next[t] = NULL; | |
892 | tn->tn_member[t] = 0; | |
893 | mutex_exit(&tl->tl_lock); | |
894 | return (p); | |
895 | } | |
896 | } | |
897 | ||
898 | mutex_exit(&tl->tl_lock); | |
899 | ||
900 | return (NULL); | |
901 | } | |
902 | ||
13fe0198 | 903 | boolean_t |
34dc7c2f BB |
904 | txg_list_member(txg_list_t *tl, void *p, uint64_t txg) |
905 | { | |
906 | int t = txg & TXG_MASK; | |
907 | txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset); | |
908 | ||
13fe0198 | 909 | return (tn->tn_member[t] != 0); |
34dc7c2f BB |
910 | } |
911 | ||
912 | /* | |
913 | * Walk a txg list -- only safe if you know it's not changing. | |
914 | */ | |
915 | void * | |
916 | txg_list_head(txg_list_t *tl, uint64_t txg) | |
917 | { | |
918 | int t = txg & TXG_MASK; | |
919 | txg_node_t *tn = tl->tl_head[t]; | |
920 | ||
921 | return (tn == NULL ? NULL : (char *)tn - tl->tl_offset); | |
922 | } | |
923 | ||
924 | void * | |
925 | txg_list_next(txg_list_t *tl, void *p, uint64_t txg) | |
926 | { | |
927 | int t = txg & TXG_MASK; | |
928 | txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset); | |
929 | ||
930 | tn = tn->tn_next[t]; | |
931 | ||
932 | return (tn == NULL ? NULL : (char *)tn - tl->tl_offset); | |
933 | } | |
c28b2279 BB |
934 | |
935 | #if defined(_KERNEL) && defined(HAVE_SPL) | |
936 | EXPORT_SYMBOL(txg_init); | |
937 | EXPORT_SYMBOL(txg_fini); | |
938 | EXPORT_SYMBOL(txg_sync_start); | |
939 | EXPORT_SYMBOL(txg_sync_stop); | |
940 | EXPORT_SYMBOL(txg_hold_open); | |
941 | EXPORT_SYMBOL(txg_rele_to_quiesce); | |
942 | EXPORT_SYMBOL(txg_rele_to_sync); | |
943 | EXPORT_SYMBOL(txg_register_callbacks); | |
944 | EXPORT_SYMBOL(txg_delay); | |
945 | EXPORT_SYMBOL(txg_wait_synced); | |
946 | EXPORT_SYMBOL(txg_wait_open); | |
54a179e7 | 947 | EXPORT_SYMBOL(txg_wait_callbacks); |
c28b2279 BB |
948 | EXPORT_SYMBOL(txg_stalled); |
949 | EXPORT_SYMBOL(txg_sync_waiting); | |
87d98efe BB |
950 | |
951 | module_param(zfs_txg_timeout, int, 0644); | |
952 | MODULE_PARM_DESC(zfs_txg_timeout, "Max seconds worth of delta per txg"); | |
c28b2279 | 953 | #endif |