]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
1d3ba0bf | 9 | * or https://opensource.org/licenses/CDDL-1.0. |
34dc7c2f BB |
10 | * See the License for the specific language governing permissions |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
492f64e9 | 23 | * Copyright (c) 2011, 2018 by Delphix. All rights reserved. |
55922e73 | 24 | * Copyright (c) 2014 Integros [integros.com] |
2ffd89fc | 25 | * Copyright (c) 2018 Datto Inc. |
34dc7c2f BB |
26 | */ |
27 | ||
428870ff BB |
28 | /* Portions Copyright 2010 Robert Milkowski */ |
29 | ||
34dc7c2f BB |
30 | #include <sys/zfs_context.h> |
31 | #include <sys/spa.h> | |
d2734cce | 32 | #include <sys/spa_impl.h> |
34dc7c2f BB |
33 | #include <sys/dmu.h> |
34 | #include <sys/zap.h> | |
35 | #include <sys/arc.h> | |
36 | #include <sys/stat.h> | |
34dc7c2f BB |
37 | #include <sys/zil.h> |
38 | #include <sys/zil_impl.h> | |
39 | #include <sys/dsl_dataset.h> | |
572e2857 | 40 | #include <sys/vdev_impl.h> |
34dc7c2f | 41 | #include <sys/dmu_tx.h> |
428870ff | 42 | #include <sys/dsl_pool.h> |
920dd524 | 43 | #include <sys/metaslab.h> |
e5d1c27e | 44 | #include <sys/trace_zfs.h> |
a6255b7f | 45 | #include <sys/abd.h> |
67a1b037 | 46 | #include <sys/brt.h> |
fb087146 | 47 | #include <sys/wmsum.h> |
34dc7c2f BB |
48 | |
49 | /* | |
1ce23dca PS |
50 | * The ZFS Intent Log (ZIL) saves "transaction records" (itxs) of system |
51 | * calls that change the file system. Each itx has enough information to | |
52 | * be able to replay them after a system crash, power loss, or | |
53 | * equivalent failure mode. These are stored in memory until either: | |
34dc7c2f | 54 | * |
1ce23dca PS |
55 | * 1. they are committed to the pool by the DMU transaction group |
56 | * (txg), at which point they can be discarded; or | |
57 | * 2. they are committed to the on-disk ZIL for the dataset being | |
58 | * modified (e.g. due to an fsync, O_DSYNC, or other synchronous | |
59 | * requirement). | |
34dc7c2f | 60 | * |
1ce23dca PS |
61 | * In the event of a crash or power loss, the itxs contained by each |
62 | * dataset's on-disk ZIL will be replayed when that dataset is first | |
e1cfd73f | 63 | * instantiated (e.g. if the dataset is a normal filesystem, when it is |
1ce23dca | 64 | * first mounted). |
34dc7c2f | 65 | * |
1ce23dca PS |
66 | * As hinted at above, there is one ZIL per dataset (both the in-memory |
67 | * representation, and the on-disk representation). The on-disk format | |
68 | * consists of 3 parts: | |
69 | * | |
70 | * - a single, per-dataset, ZIL header; which points to a chain of | |
71 | * - zero or more ZIL blocks; each of which contains | |
72 | * - zero or more ZIL records | |
73 | * | |
74 | * A ZIL record holds the information necessary to replay a single | |
75 | * system call transaction. A ZIL block can hold many ZIL records, and | |
76 | * the blocks are chained together, similarly to a singly linked list. | |
77 | * | |
78 | * Each ZIL block contains a block pointer (blkptr_t) to the next ZIL | |
79 | * block in the chain, and the ZIL header points to the first block in | |
80 | * the chain. | |
81 | * | |
82 | * Note, there is not a fixed place in the pool to hold these ZIL | |
83 | * blocks; they are dynamically allocated and freed as needed from the | |
84 | * blocks available on the pool, though they can be preferentially | |
85 | * allocated from a dedicated "log" vdev. | |
34dc7c2f BB |
86 | */ |
87 | ||
1ce23dca PS |
88 | /* |
89 | * This controls the amount of time that a ZIL block (lwb) will remain | |
90 | * "open" when it isn't "full", and it has a thread waiting for it to be | |
91 | * committed to stable storage. Please refer to the zil_commit_waiter() | |
92 | * function (and the comments within it) for more details. | |
93 | */ | |
252f46be | 94 | static uint_t zfs_commit_timeout_pct = 10; |
0f740a4f | 95 | |
b6ad9671 ED |
96 | /* |
97 | * See zil.h for more information about these fields. | |
98 | */ | |
fb087146 | 99 | static zil_kstat_values_t zil_stats = { |
d1d7e268 MK |
100 | { "zil_commit_count", KSTAT_DATA_UINT64 }, |
101 | { "zil_commit_writer_count", KSTAT_DATA_UINT64 }, | |
102 | { "zil_itx_count", KSTAT_DATA_UINT64 }, | |
103 | { "zil_itx_indirect_count", KSTAT_DATA_UINT64 }, | |
104 | { "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 }, | |
105 | { "zil_itx_copied_count", KSTAT_DATA_UINT64 }, | |
106 | { "zil_itx_copied_bytes", KSTAT_DATA_UINT64 }, | |
107 | { "zil_itx_needcopy_count", KSTAT_DATA_UINT64 }, | |
108 | { "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 }, | |
109 | { "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 }, | |
110 | { "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 }, | |
b6fbe61f AM |
111 | { "zil_itx_metaslab_normal_write", KSTAT_DATA_UINT64 }, |
112 | { "zil_itx_metaslab_normal_alloc", KSTAT_DATA_UINT64 }, | |
d1d7e268 MK |
113 | { "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 }, |
114 | { "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 }, | |
b6fbe61f AM |
115 | { "zil_itx_metaslab_slog_write", KSTAT_DATA_UINT64 }, |
116 | { "zil_itx_metaslab_slog_alloc", KSTAT_DATA_UINT64 }, | |
b6ad9671 ED |
117 | }; |
118 | ||
fb087146 AH |
119 | static zil_sums_t zil_sums_global; |
120 | static kstat_t *zil_kstats_global; | |
b6ad9671 | 121 | |
34dc7c2f | 122 | /* |
d3cc8b15 | 123 | * Disable intent logging replay. This global ZIL switch affects all pools. |
34dc7c2f | 124 | */ |
d3cc8b15 | 125 | int zil_replay_disable = 0; |
34dc7c2f BB |
126 | |
127 | /* | |
53b1f5ea PS |
128 | * Disable the DKIOCFLUSHWRITECACHE commands that are normally sent to |
129 | * the disk(s) by the ZIL after an LWB write has completed. Setting this | |
130 | * will cause ZIL corruption on power loss if a volatile out-of-order | |
131 | * write cache is enabled. | |
34dc7c2f | 132 | */ |
18168da7 | 133 | static int zil_nocacheflush = 0; |
34dc7c2f | 134 | |
1b7c1e5c GDN |
135 | /* |
136 | * Limit SLOG write size per commit executed with synchronous priority. | |
137 | * Any writes above that will be executed with lower (asynchronous) priority | |
138 | * to limit potential SLOG device abuse by single active ZIL writer. | |
139 | */ | |
c0e58995 | 140 | static uint64_t zil_slog_bulk = 64 * 1024 * 1024; |
1b7c1e5c | 141 | |
34dc7c2f | 142 | static kmem_cache_t *zil_lwb_cache; |
1ce23dca | 143 | static kmem_cache_t *zil_zcw_cache; |
34dc7c2f | 144 | |
f63811f0 AM |
145 | static void zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx); |
146 | static itx_t *zil_itx_clone(itx_t *oitx); | |
eff77a80 | 147 | static uint64_t zil_max_waste_space(zilog_t *zilog); |
f63811f0 | 148 | |
34dc7c2f | 149 | static int |
428870ff | 150 | zil_bp_compare(const void *x1, const void *x2) |
34dc7c2f | 151 | { |
428870ff BB |
152 | const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva; |
153 | const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva; | |
34dc7c2f | 154 | |
ca577779 | 155 | int cmp = TREE_CMP(DVA_GET_VDEV(dva1), DVA_GET_VDEV(dva2)); |
ee36c709 GN |
156 | if (likely(cmp)) |
157 | return (cmp); | |
34dc7c2f | 158 | |
ca577779 | 159 | return (TREE_CMP(DVA_GET_OFFSET(dva1), DVA_GET_OFFSET(dva2))); |
34dc7c2f BB |
160 | } |
161 | ||
162 | static void | |
428870ff | 163 | zil_bp_tree_init(zilog_t *zilog) |
34dc7c2f | 164 | { |
428870ff BB |
165 | avl_create(&zilog->zl_bp_tree, zil_bp_compare, |
166 | sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node)); | |
34dc7c2f BB |
167 | } |
168 | ||
169 | static void | |
428870ff | 170 | zil_bp_tree_fini(zilog_t *zilog) |
34dc7c2f | 171 | { |
428870ff BB |
172 | avl_tree_t *t = &zilog->zl_bp_tree; |
173 | zil_bp_node_t *zn; | |
34dc7c2f BB |
174 | void *cookie = NULL; |
175 | ||
176 | while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) | |
428870ff | 177 | kmem_free(zn, sizeof (zil_bp_node_t)); |
34dc7c2f BB |
178 | |
179 | avl_destroy(t); | |
180 | } | |
181 | ||
428870ff BB |
182 | int |
183 | zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp) | |
34dc7c2f | 184 | { |
428870ff | 185 | avl_tree_t *t = &zilog->zl_bp_tree; |
9b67f605 | 186 | const dva_t *dva; |
428870ff | 187 | zil_bp_node_t *zn; |
34dc7c2f BB |
188 | avl_index_t where; |
189 | ||
9b67f605 MA |
190 | if (BP_IS_EMBEDDED(bp)) |
191 | return (0); | |
192 | ||
193 | dva = BP_IDENTITY(bp); | |
194 | ||
34dc7c2f | 195 | if (avl_find(t, dva, &where) != NULL) |
2e528b49 | 196 | return (SET_ERROR(EEXIST)); |
34dc7c2f | 197 | |
79c76d5b | 198 | zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP); |
34dc7c2f BB |
199 | zn->zn_dva = *dva; |
200 | avl_insert(t, zn, where); | |
201 | ||
202 | return (0); | |
203 | } | |
204 | ||
205 | static zil_header_t * | |
206 | zil_header_in_syncing_context(zilog_t *zilog) | |
207 | { | |
208 | return ((zil_header_t *)zilog->zl_header); | |
209 | } | |
210 | ||
211 | static void | |
212 | zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) | |
213 | { | |
214 | zio_cksum_t *zc = &bp->blk_cksum; | |
215 | ||
29274c9f AM |
216 | (void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_0], |
217 | sizeof (zc->zc_word[ZIL_ZC_GUID_0])); | |
218 | (void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_1], | |
219 | sizeof (zc->zc_word[ZIL_ZC_GUID_1])); | |
34dc7c2f BB |
220 | zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); |
221 | zc->zc_word[ZIL_ZC_SEQ] = 1ULL; | |
222 | } | |
223 | ||
fb087146 AH |
224 | static int |
225 | zil_kstats_global_update(kstat_t *ksp, int rw) | |
226 | { | |
227 | zil_kstat_values_t *zs = ksp->ks_data; | |
228 | ASSERT3P(&zil_stats, ==, zs); | |
229 | ||
230 | if (rw == KSTAT_WRITE) { | |
231 | return (SET_ERROR(EACCES)); | |
232 | } | |
233 | ||
234 | zil_kstat_values_update(zs, &zil_sums_global); | |
235 | ||
236 | return (0); | |
237 | } | |
238 | ||
34dc7c2f | 239 | /* |
428870ff | 240 | * Read a log block and make sure it's valid. |
34dc7c2f BB |
241 | */ |
242 | static int | |
b5256303 | 243 | zil_read_log_block(zilog_t *zilog, boolean_t decrypt, const blkptr_t *bp, |
482da24e | 244 | blkptr_t *nbp, char **begin, char **end, arc_buf_t **abuf) |
34dc7c2f | 245 | { |
4938d01d | 246 | zio_flag_t zio_flags = ZIO_FLAG_CANFAIL; |
2a432414 | 247 | arc_flags_t aflags = ARC_FLAG_WAIT; |
5dbd68a3 | 248 | zbookmark_phys_t zb; |
34dc7c2f BB |
249 | int error; |
250 | ||
428870ff BB |
251 | if (zilog->zl_header->zh_claim_txg == 0) |
252 | zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; | |
34dc7c2f | 253 | |
428870ff BB |
254 | if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) |
255 | zio_flags |= ZIO_FLAG_SPECULATIVE; | |
34dc7c2f | 256 | |
b5256303 TC |
257 | if (!decrypt) |
258 | zio_flags |= ZIO_FLAG_RAW; | |
259 | ||
428870ff BB |
260 | SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET], |
261 | ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); | |
262 | ||
b5256303 | 263 | error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, |
482da24e | 264 | abuf, ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); |
34dc7c2f BB |
265 | |
266 | if (error == 0) { | |
34dc7c2f BB |
267 | zio_cksum_t cksum = bp->blk_cksum; |
268 | ||
269 | /* | |
b128c09f BB |
270 | * Validate the checksummed log block. |
271 | * | |
34dc7c2f BB |
272 | * Sequence numbers should be... sequential. The checksum |
273 | * verifier for the next block should be bp's checksum plus 1. | |
b128c09f BB |
274 | * |
275 | * Also check the log chain linkage and size used. | |
34dc7c2f BB |
276 | */ |
277 | cksum.zc_word[ZIL_ZC_SEQ]++; | |
278 | ||
482da24e | 279 | uint64_t size = BP_GET_LSIZE(bp); |
428870ff | 280 | if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { |
482da24e | 281 | zil_chain_t *zilc = (*abuf)->b_data; |
428870ff | 282 | char *lr = (char *)(zilc + 1); |
34dc7c2f | 283 | |
861166b0 | 284 | if (memcmp(&cksum, &zilc->zc_next_blk.blk_cksum, |
8e20e0ff | 285 | sizeof (cksum)) || |
482da24e AM |
286 | zilc->zc_nused < sizeof (*zilc) || |
287 | zilc->zc_nused > size) { | |
2e528b49 | 288 | error = SET_ERROR(ECKSUM); |
428870ff | 289 | } else { |
482da24e AM |
290 | *begin = lr; |
291 | *end = lr + zilc->zc_nused - sizeof (*zilc); | |
428870ff BB |
292 | *nbp = zilc->zc_next_blk; |
293 | } | |
294 | } else { | |
482da24e | 295 | char *lr = (*abuf)->b_data; |
428870ff BB |
296 | zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1; |
297 | ||
861166b0 | 298 | if (memcmp(&cksum, &zilc->zc_next_blk.blk_cksum, |
8e20e0ff | 299 | sizeof (cksum)) || |
428870ff | 300 | (zilc->zc_nused > (size - sizeof (*zilc)))) { |
2e528b49 | 301 | error = SET_ERROR(ECKSUM); |
428870ff | 302 | } else { |
482da24e AM |
303 | *begin = lr; |
304 | *end = lr + zilc->zc_nused; | |
428870ff BB |
305 | *nbp = zilc->zc_next_blk; |
306 | } | |
34dc7c2f | 307 | } |
428870ff BB |
308 | } |
309 | ||
310 | return (error); | |
311 | } | |
312 | ||
313 | /* | |
314 | * Read a TX_WRITE log data block. | |
315 | */ | |
316 | static int | |
317 | zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf) | |
318 | { | |
4938d01d | 319 | zio_flag_t zio_flags = ZIO_FLAG_CANFAIL; |
428870ff | 320 | const blkptr_t *bp = &lr->lr_blkptr; |
2a432414 | 321 | arc_flags_t aflags = ARC_FLAG_WAIT; |
428870ff | 322 | arc_buf_t *abuf = NULL; |
5dbd68a3 | 323 | zbookmark_phys_t zb; |
428870ff BB |
324 | int error; |
325 | ||
326 | if (BP_IS_HOLE(bp)) { | |
327 | if (wbuf != NULL) | |
861166b0 | 328 | memset(wbuf, 0, MAX(BP_GET_LSIZE(bp), lr->lr_length)); |
428870ff | 329 | return (0); |
34dc7c2f BB |
330 | } |
331 | ||
428870ff BB |
332 | if (zilog->zl_header->zh_claim_txg == 0) |
333 | zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; | |
334 | ||
b5256303 TC |
335 | /* |
336 | * If we are not using the resulting data, we are just checking that | |
337 | * it hasn't been corrupted so we don't need to waste CPU time | |
338 | * decompressing and decrypting it. | |
339 | */ | |
340 | if (wbuf == NULL) | |
341 | zio_flags |= ZIO_FLAG_RAW; | |
342 | ||
a6ccb36b | 343 | ASSERT3U(BP_GET_LSIZE(bp), !=, 0); |
428870ff BB |
344 | SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid, |
345 | ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); | |
346 | ||
294f6806 | 347 | error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, |
428870ff BB |
348 | ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); |
349 | ||
350 | if (error == 0) { | |
351 | if (wbuf != NULL) | |
861166b0 | 352 | memcpy(wbuf, abuf->b_data, arc_buf_size(abuf)); |
d3c2ae1c | 353 | arc_buf_destroy(abuf, &abuf); |
428870ff | 354 | } |
34dc7c2f BB |
355 | |
356 | return (error); | |
357 | } | |
358 | ||
fb087146 AH |
359 | void |
360 | zil_sums_init(zil_sums_t *zs) | |
361 | { | |
362 | wmsum_init(&zs->zil_commit_count, 0); | |
363 | wmsum_init(&zs->zil_commit_writer_count, 0); | |
364 | wmsum_init(&zs->zil_itx_count, 0); | |
365 | wmsum_init(&zs->zil_itx_indirect_count, 0); | |
366 | wmsum_init(&zs->zil_itx_indirect_bytes, 0); | |
367 | wmsum_init(&zs->zil_itx_copied_count, 0); | |
368 | wmsum_init(&zs->zil_itx_copied_bytes, 0); | |
369 | wmsum_init(&zs->zil_itx_needcopy_count, 0); | |
370 | wmsum_init(&zs->zil_itx_needcopy_bytes, 0); | |
371 | wmsum_init(&zs->zil_itx_metaslab_normal_count, 0); | |
372 | wmsum_init(&zs->zil_itx_metaslab_normal_bytes, 0); | |
b6fbe61f AM |
373 | wmsum_init(&zs->zil_itx_metaslab_normal_write, 0); |
374 | wmsum_init(&zs->zil_itx_metaslab_normal_alloc, 0); | |
fb087146 AH |
375 | wmsum_init(&zs->zil_itx_metaslab_slog_count, 0); |
376 | wmsum_init(&zs->zil_itx_metaslab_slog_bytes, 0); | |
b6fbe61f AM |
377 | wmsum_init(&zs->zil_itx_metaslab_slog_write, 0); |
378 | wmsum_init(&zs->zil_itx_metaslab_slog_alloc, 0); | |
fb087146 AH |
379 | } |
380 | ||
381 | void | |
382 | zil_sums_fini(zil_sums_t *zs) | |
383 | { | |
384 | wmsum_fini(&zs->zil_commit_count); | |
385 | wmsum_fini(&zs->zil_commit_writer_count); | |
386 | wmsum_fini(&zs->zil_itx_count); | |
387 | wmsum_fini(&zs->zil_itx_indirect_count); | |
388 | wmsum_fini(&zs->zil_itx_indirect_bytes); | |
389 | wmsum_fini(&zs->zil_itx_copied_count); | |
390 | wmsum_fini(&zs->zil_itx_copied_bytes); | |
391 | wmsum_fini(&zs->zil_itx_needcopy_count); | |
392 | wmsum_fini(&zs->zil_itx_needcopy_bytes); | |
393 | wmsum_fini(&zs->zil_itx_metaslab_normal_count); | |
394 | wmsum_fini(&zs->zil_itx_metaslab_normal_bytes); | |
b6fbe61f AM |
395 | wmsum_fini(&zs->zil_itx_metaslab_normal_write); |
396 | wmsum_fini(&zs->zil_itx_metaslab_normal_alloc); | |
fb087146 AH |
397 | wmsum_fini(&zs->zil_itx_metaslab_slog_count); |
398 | wmsum_fini(&zs->zil_itx_metaslab_slog_bytes); | |
b6fbe61f AM |
399 | wmsum_fini(&zs->zil_itx_metaslab_slog_write); |
400 | wmsum_fini(&zs->zil_itx_metaslab_slog_alloc); | |
fb087146 AH |
401 | } |
402 | ||
403 | void | |
404 | zil_kstat_values_update(zil_kstat_values_t *zs, zil_sums_t *zil_sums) | |
405 | { | |
406 | zs->zil_commit_count.value.ui64 = | |
407 | wmsum_value(&zil_sums->zil_commit_count); | |
408 | zs->zil_commit_writer_count.value.ui64 = | |
409 | wmsum_value(&zil_sums->zil_commit_writer_count); | |
410 | zs->zil_itx_count.value.ui64 = | |
411 | wmsum_value(&zil_sums->zil_itx_count); | |
412 | zs->zil_itx_indirect_count.value.ui64 = | |
413 | wmsum_value(&zil_sums->zil_itx_indirect_count); | |
414 | zs->zil_itx_indirect_bytes.value.ui64 = | |
415 | wmsum_value(&zil_sums->zil_itx_indirect_bytes); | |
416 | zs->zil_itx_copied_count.value.ui64 = | |
417 | wmsum_value(&zil_sums->zil_itx_copied_count); | |
418 | zs->zil_itx_copied_bytes.value.ui64 = | |
419 | wmsum_value(&zil_sums->zil_itx_copied_bytes); | |
420 | zs->zil_itx_needcopy_count.value.ui64 = | |
421 | wmsum_value(&zil_sums->zil_itx_needcopy_count); | |
422 | zs->zil_itx_needcopy_bytes.value.ui64 = | |
423 | wmsum_value(&zil_sums->zil_itx_needcopy_bytes); | |
424 | zs->zil_itx_metaslab_normal_count.value.ui64 = | |
425 | wmsum_value(&zil_sums->zil_itx_metaslab_normal_count); | |
426 | zs->zil_itx_metaslab_normal_bytes.value.ui64 = | |
427 | wmsum_value(&zil_sums->zil_itx_metaslab_normal_bytes); | |
b6fbe61f AM |
428 | zs->zil_itx_metaslab_normal_write.value.ui64 = |
429 | wmsum_value(&zil_sums->zil_itx_metaslab_normal_write); | |
430 | zs->zil_itx_metaslab_normal_alloc.value.ui64 = | |
431 | wmsum_value(&zil_sums->zil_itx_metaslab_normal_alloc); | |
fb087146 AH |
432 | zs->zil_itx_metaslab_slog_count.value.ui64 = |
433 | wmsum_value(&zil_sums->zil_itx_metaslab_slog_count); | |
434 | zs->zil_itx_metaslab_slog_bytes.value.ui64 = | |
435 | wmsum_value(&zil_sums->zil_itx_metaslab_slog_bytes); | |
b6fbe61f AM |
436 | zs->zil_itx_metaslab_slog_write.value.ui64 = |
437 | wmsum_value(&zil_sums->zil_itx_metaslab_slog_write); | |
438 | zs->zil_itx_metaslab_slog_alloc.value.ui64 = | |
439 | wmsum_value(&zil_sums->zil_itx_metaslab_slog_alloc); | |
fb087146 AH |
440 | } |
441 | ||
34dc7c2f BB |
442 | /* |
443 | * Parse the intent log, and call parse_func for each valid record within. | |
34dc7c2f | 444 | */ |
428870ff | 445 | int |
34dc7c2f | 446 | zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, |
b5256303 TC |
447 | zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg, |
448 | boolean_t decrypt) | |
34dc7c2f BB |
449 | { |
450 | const zil_header_t *zh = zilog->zl_header; | |
428870ff BB |
451 | boolean_t claimed = !!zh->zh_claim_txg; |
452 | uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX; | |
453 | uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX; | |
454 | uint64_t max_blk_seq = 0; | |
455 | uint64_t max_lr_seq = 0; | |
456 | uint64_t blk_count = 0; | |
457 | uint64_t lr_count = 0; | |
861166b0 | 458 | blkptr_t blk, next_blk = {{{{0}}}}; |
428870ff | 459 | int error = 0; |
34dc7c2f | 460 | |
428870ff BB |
461 | /* |
462 | * Old logs didn't record the maximum zh_claim_lr_seq. | |
463 | */ | |
464 | if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) | |
465 | claim_lr_seq = UINT64_MAX; | |
34dc7c2f BB |
466 | |
467 | /* | |
468 | * Starting at the block pointed to by zh_log we read the log chain. | |
469 | * For each block in the chain we strongly check that block to | |
470 | * ensure its validity. We stop when an invalid block is found. | |
471 | * For each block pointer in the chain we call parse_blk_func(). | |
472 | * For each record in each valid block we call parse_lr_func(). | |
473 | * If the log has been claimed, stop if we encounter a sequence | |
474 | * number greater than the highest claimed sequence number. | |
475 | */ | |
428870ff | 476 | zil_bp_tree_init(zilog); |
34dc7c2f | 477 | |
428870ff BB |
478 | for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) { |
479 | uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; | |
480 | int reclen; | |
482da24e AM |
481 | char *lrp, *end; |
482 | arc_buf_t *abuf = NULL; | |
34dc7c2f | 483 | |
428870ff BB |
484 | if (blk_seq > claim_blk_seq) |
485 | break; | |
b5256303 TC |
486 | |
487 | error = parse_blk_func(zilog, &blk, arg, txg); | |
488 | if (error != 0) | |
428870ff BB |
489 | break; |
490 | ASSERT3U(max_blk_seq, <, blk_seq); | |
491 | max_blk_seq = blk_seq; | |
492 | blk_count++; | |
34dc7c2f | 493 | |
428870ff BB |
494 | if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq) |
495 | break; | |
34dc7c2f | 496 | |
b5256303 | 497 | error = zil_read_log_block(zilog, decrypt, &blk, &next_blk, |
482da24e | 498 | &lrp, &end, &abuf); |
748b9d5b | 499 | if (error != 0) { |
482da24e AM |
500 | if (abuf) |
501 | arc_buf_destroy(abuf, &abuf); | |
748b9d5b RM |
502 | if (claimed) { |
503 | char name[ZFS_MAX_DATASET_NAME_LEN]; | |
504 | ||
505 | dmu_objset_name(zilog->zl_os, name); | |
506 | ||
507 | cmn_err(CE_WARN, "ZFS read log block error %d, " | |
508 | "dataset %s, seq 0x%llx\n", error, name, | |
509 | (u_longlong_t)blk_seq); | |
510 | } | |
34dc7c2f | 511 | break; |
748b9d5b | 512 | } |
34dc7c2f | 513 | |
482da24e | 514 | for (; lrp < end; lrp += reclen) { |
34dc7c2f BB |
515 | lr_t *lr = (lr_t *)lrp; |
516 | reclen = lr->lrc_reclen; | |
517 | ASSERT3U(reclen, >=, sizeof (lr_t)); | |
2a27fd41 | 518 | ASSERT3U(reclen, <=, end - lrp); |
8e8acabd AM |
519 | if (lr->lrc_seq > claim_lr_seq) { |
520 | arc_buf_destroy(abuf, &abuf); | |
428870ff | 521 | goto done; |
8e8acabd | 522 | } |
b5256303 TC |
523 | |
524 | error = parse_lr_func(zilog, lr, arg, txg); | |
8e8acabd AM |
525 | if (error != 0) { |
526 | arc_buf_destroy(abuf, &abuf); | |
428870ff | 527 | goto done; |
8e8acabd | 528 | } |
428870ff BB |
529 | ASSERT3U(max_lr_seq, <, lr->lrc_seq); |
530 | max_lr_seq = lr->lrc_seq; | |
531 | lr_count++; | |
34dc7c2f | 532 | } |
482da24e | 533 | arc_buf_destroy(abuf, &abuf); |
34dc7c2f | 534 | } |
428870ff BB |
535 | done: |
536 | zilog->zl_parse_error = error; | |
537 | zilog->zl_parse_blk_seq = max_blk_seq; | |
538 | zilog->zl_parse_lr_seq = max_lr_seq; | |
539 | zilog->zl_parse_blk_count = blk_count; | |
540 | zilog->zl_parse_lr_count = lr_count; | |
541 | ||
428870ff | 542 | zil_bp_tree_fini(zilog); |
34dc7c2f | 543 | |
428870ff | 544 | return (error); |
34dc7c2f BB |
545 | } |
546 | ||
d2734cce | 547 | static int |
61868bb1 CS |
548 | zil_clear_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, |
549 | uint64_t first_txg) | |
d2734cce | 550 | { |
14e4e3cb | 551 | (void) tx; |
d2734cce SD |
552 | ASSERT(!BP_IS_HOLE(bp)); |
553 | ||
554 | /* | |
555 | * As we call this function from the context of a rewind to a | |
556 | * checkpoint, each ZIL block whose txg is later than the txg | |
557 | * that we rewind to is invalid. Thus, we return -1 so | |
558 | * zil_parse() doesn't attempt to read it. | |
559 | */ | |
493fcce9 | 560 | if (BP_GET_LOGICAL_BIRTH(bp) >= first_txg) |
d2734cce SD |
561 | return (-1); |
562 | ||
563 | if (zil_bp_tree_add(zilog, bp) != 0) | |
564 | return (0); | |
565 | ||
566 | zio_free(zilog->zl_spa, first_txg, bp); | |
567 | return (0); | |
568 | } | |
569 | ||
d2734cce | 570 | static int |
61868bb1 CS |
571 | zil_noop_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, |
572 | uint64_t first_txg) | |
d2734cce | 573 | { |
14e4e3cb | 574 | (void) zilog, (void) lrc, (void) tx, (void) first_txg; |
d2734cce SD |
575 | return (0); |
576 | } | |
577 | ||
428870ff | 578 | static int |
61868bb1 CS |
579 | zil_claim_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, |
580 | uint64_t first_txg) | |
34dc7c2f | 581 | { |
34dc7c2f BB |
582 | /* |
583 | * Claim log block if not already committed and not already claimed. | |
428870ff | 584 | * If tx == NULL, just verify that the block is claimable. |
34dc7c2f | 585 | */ |
493fcce9 | 586 | if (BP_IS_HOLE(bp) || BP_GET_LOGICAL_BIRTH(bp) < first_txg || |
b0bc7a84 | 587 | zil_bp_tree_add(zilog, bp) != 0) |
428870ff BB |
588 | return (0); |
589 | ||
590 | return (zio_wait(zio_claim(NULL, zilog->zl_spa, | |
591 | tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL, | |
592 | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB))); | |
34dc7c2f BB |
593 | } |
594 | ||
428870ff | 595 | static int |
67a1b037 | 596 | zil_claim_write(zilog_t *zilog, const lr_t *lrc, void *tx, uint64_t first_txg) |
34dc7c2f | 597 | { |
428870ff BB |
598 | lr_write_t *lr = (lr_write_t *)lrc; |
599 | int error; | |
600 | ||
2a27fd41 | 601 | ASSERT3U(lrc->lrc_reclen, >=, sizeof (*lr)); |
428870ff BB |
602 | |
603 | /* | |
604 | * If the block is not readable, don't claim it. This can happen | |
605 | * in normal operation when a log block is written to disk before | |
606 | * some of the dmu_sync() blocks it points to. In this case, the | |
607 | * transaction cannot have been committed to anyone (we would have | |
608 | * waited for all writes to be stable first), so it is semantically | |
609 | * correct to declare this the end of the log. | |
610 | */ | |
493fcce9 | 611 | if (BP_GET_LOGICAL_BIRTH(&lr->lr_blkptr) >= first_txg) { |
b5256303 TC |
612 | error = zil_read_log_data(zilog, lr, NULL); |
613 | if (error != 0) | |
614 | return (error); | |
615 | } | |
616 | ||
428870ff | 617 | return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg)); |
34dc7c2f BB |
618 | } |
619 | ||
67a1b037 | 620 | static int |
55b764e0 AM |
621 | zil_claim_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx, |
622 | uint64_t first_txg) | |
67a1b037 PJD |
623 | { |
624 | const lr_clone_range_t *lr = (const lr_clone_range_t *)lrc; | |
625 | const blkptr_t *bp; | |
55b764e0 | 626 | spa_t *spa = zilog->zl_spa; |
67a1b037 PJD |
627 | uint_t ii; |
628 | ||
2a27fd41 AM |
629 | ASSERT3U(lrc->lrc_reclen, >=, sizeof (*lr)); |
630 | ASSERT3U(lrc->lrc_reclen, >=, offsetof(lr_clone_range_t, | |
631 | lr_bps[lr->lr_nbps])); | |
67a1b037 PJD |
632 | |
633 | if (tx == NULL) { | |
634 | return (0); | |
635 | } | |
636 | ||
637 | /* | |
638 | * XXX: Do we need to byteswap lr? | |
639 | */ | |
640 | ||
67a1b037 PJD |
641 | for (ii = 0; ii < lr->lr_nbps; ii++) { |
642 | bp = &lr->lr_bps[ii]; | |
643 | ||
644 | /* | |
55b764e0 AM |
645 | * When data is embedded into the BP there is no need to create |
646 | * BRT entry as there is no data block. Just copy the BP as it | |
647 | * contains the data. | |
67a1b037 | 648 | */ |
55b764e0 AM |
649 | if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) |
650 | continue; | |
651 | ||
652 | /* | |
653 | * We can not handle block pointers from the future, since they | |
654 | * are not yet allocated. It should not normally happen, but | |
655 | * just in case lets be safe and just stop here now instead of | |
656 | * corrupting the pool. | |
657 | */ | |
493fcce9 | 658 | if (BP_GET_BIRTH(bp) >= first_txg) |
55b764e0 AM |
659 | return (SET_ERROR(ENOENT)); |
660 | ||
661 | /* | |
662 | * Assert the block is really allocated before we reference it. | |
663 | */ | |
664 | metaslab_check_free(spa, bp); | |
665 | } | |
666 | ||
667 | for (ii = 0; ii < lr->lr_nbps; ii++) { | |
668 | bp = &lr->lr_bps[ii]; | |
669 | if (!BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) | |
67a1b037 | 670 | brt_pending_add(spa, bp, tx); |
67a1b037 PJD |
671 | } |
672 | ||
673 | return (0); | |
674 | } | |
675 | ||
676 | static int | |
677 | zil_claim_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, | |
678 | uint64_t first_txg) | |
679 | { | |
680 | ||
681 | switch (lrc->lrc_txtype) { | |
682 | case TX_WRITE: | |
683 | return (zil_claim_write(zilog, lrc, tx, first_txg)); | |
684 | case TX_CLONE_RANGE: | |
55b764e0 | 685 | return (zil_claim_clone_range(zilog, lrc, tx, first_txg)); |
67a1b037 PJD |
686 | default: |
687 | return (0); | |
688 | } | |
689 | } | |
690 | ||
428870ff | 691 | static int |
61868bb1 CS |
692 | zil_free_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, |
693 | uint64_t claim_txg) | |
34dc7c2f | 694 | { |
14e4e3cb AZ |
695 | (void) claim_txg; |
696 | ||
d2734cce | 697 | zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); |
428870ff BB |
698 | |
699 | return (0); | |
34dc7c2f BB |
700 | } |
701 | ||
428870ff | 702 | static int |
67a1b037 | 703 | zil_free_write(zilog_t *zilog, const lr_t *lrc, void *tx, uint64_t claim_txg) |
34dc7c2f | 704 | { |
428870ff BB |
705 | lr_write_t *lr = (lr_write_t *)lrc; |
706 | blkptr_t *bp = &lr->lr_blkptr; | |
707 | ||
2a27fd41 | 708 | ASSERT3U(lrc->lrc_reclen, >=, sizeof (*lr)); |
67a1b037 | 709 | |
34dc7c2f BB |
710 | /* |
711 | * If we previously claimed it, we need to free it. | |
712 | */ | |
493fcce9 GW |
713 | if (BP_GET_LOGICAL_BIRTH(bp) >= claim_txg && |
714 | zil_bp_tree_add(zilog, bp) == 0 && !BP_IS_HOLE(bp)) { | |
428870ff | 715 | zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); |
67a1b037 | 716 | } |
428870ff BB |
717 | |
718 | return (0); | |
719 | } | |
720 | ||
67a1b037 PJD |
721 | static int |
722 | zil_free_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx) | |
723 | { | |
724 | const lr_clone_range_t *lr = (const lr_clone_range_t *)lrc; | |
725 | const blkptr_t *bp; | |
726 | spa_t *spa; | |
727 | uint_t ii; | |
728 | ||
2a27fd41 AM |
729 | ASSERT3U(lrc->lrc_reclen, >=, sizeof (*lr)); |
730 | ASSERT3U(lrc->lrc_reclen, >=, offsetof(lr_clone_range_t, | |
731 | lr_bps[lr->lr_nbps])); | |
67a1b037 PJD |
732 | |
733 | if (tx == NULL) { | |
734 | return (0); | |
735 | } | |
736 | ||
737 | spa = zilog->zl_spa; | |
738 | ||
739 | for (ii = 0; ii < lr->lr_nbps; ii++) { | |
740 | bp = &lr->lr_bps[ii]; | |
741 | ||
742 | if (!BP_IS_HOLE(bp)) { | |
743 | zio_free(spa, dmu_tx_get_txg(tx), bp); | |
744 | } | |
745 | } | |
746 | ||
747 | return (0); | |
748 | } | |
749 | ||
750 | static int | |
751 | zil_free_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, | |
752 | uint64_t claim_txg) | |
753 | { | |
754 | ||
755 | if (claim_txg == 0) { | |
756 | return (0); | |
757 | } | |
758 | ||
759 | switch (lrc->lrc_txtype) { | |
760 | case TX_WRITE: | |
761 | return (zil_free_write(zilog, lrc, tx, claim_txg)); | |
762 | case TX_CLONE_RANGE: | |
763 | return (zil_free_clone_range(zilog, lrc, tx)); | |
764 | default: | |
765 | return (0); | |
766 | } | |
767 | } | |
768 | ||
1ce23dca PS |
769 | static int |
770 | zil_lwb_vdev_compare(const void *x1, const void *x2) | |
771 | { | |
772 | const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev; | |
773 | const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev; | |
774 | ||
ca577779 | 775 | return (TREE_CMP(v1, v2)); |
1ce23dca PS |
776 | } |
777 | ||
eda3fcd5 AM |
778 | /* |
779 | * Allocate a new lwb. We may already have a block pointer for it, in which | |
780 | * case we get size and version from there. Or we may not yet, in which case | |
781 | * we choose them here and later make the block allocation match. | |
782 | */ | |
428870ff | 783 | static lwb_t * |
eda3fcd5 AM |
784 | zil_alloc_lwb(zilog_t *zilog, int sz, blkptr_t *bp, boolean_t slog, |
785 | uint64_t txg, lwb_state_t state) | |
428870ff BB |
786 | { |
787 | lwb_t *lwb; | |
788 | ||
79c76d5b | 789 | lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); |
428870ff | 790 | lwb->lwb_zilog = zilog; |
eda3fcd5 AM |
791 | if (bp) { |
792 | lwb->lwb_blk = *bp; | |
793 | lwb->lwb_slim = (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2); | |
794 | sz = BP_GET_LSIZE(bp); | |
795 | } else { | |
796 | BP_ZERO(&lwb->lwb_blk); | |
797 | lwb->lwb_slim = (spa_version(zilog->zl_spa) >= | |
798 | SPA_VERSION_SLIM_ZIL); | |
799 | } | |
1b7c1e5c | 800 | lwb->lwb_slog = slog; |
eda3fcd5 AM |
801 | lwb->lwb_error = 0; |
802 | if (lwb->lwb_slim) { | |
803 | lwb->lwb_nmax = sz; | |
f63811f0 | 804 | lwb->lwb_nused = lwb->lwb_nfilled = sizeof (zil_chain_t); |
f63811f0 | 805 | } else { |
eda3fcd5 | 806 | lwb->lwb_nmax = sz - sizeof (zil_chain_t); |
f63811f0 | 807 | lwb->lwb_nused = lwb->lwb_nfilled = 0; |
f63811f0 | 808 | } |
eda3fcd5 AM |
809 | lwb->lwb_sz = sz; |
810 | lwb->lwb_state = state; | |
811 | lwb->lwb_buf = zio_buf_alloc(sz); | |
812 | lwb->lwb_child_zio = NULL; | |
1ce23dca PS |
813 | lwb->lwb_write_zio = NULL; |
814 | lwb->lwb_root_zio = NULL; | |
1ce23dca | 815 | lwb->lwb_issued_timestamp = 0; |
152d6fda | 816 | lwb->lwb_issued_txg = 0; |
eda3fcd5 AM |
817 | lwb->lwb_alloc_txg = txg; |
818 | lwb->lwb_max_txg = 0; | |
428870ff BB |
819 | |
820 | mutex_enter(&zilog->zl_lock); | |
821 | list_insert_tail(&zilog->zl_lwb_list, lwb); | |
eda3fcd5 AM |
822 | if (state != LWB_STATE_NEW) |
823 | zilog->zl_last_lwb_opened = lwb; | |
428870ff BB |
824 | mutex_exit(&zilog->zl_lock); |
825 | ||
826 | return (lwb); | |
34dc7c2f BB |
827 | } |
828 | ||
1ce23dca PS |
829 | static void |
830 | zil_free_lwb(zilog_t *zilog, lwb_t *lwb) | |
831 | { | |
832 | ASSERT(MUTEX_HELD(&zilog->zl_lock)); | |
bbcf18c2 AM |
833 | ASSERT(lwb->lwb_state == LWB_STATE_NEW || |
834 | lwb->lwb_state == LWB_STATE_FLUSH_DONE); | |
eda3fcd5 | 835 | ASSERT3P(lwb->lwb_child_zio, ==, NULL); |
1ce23dca PS |
836 | ASSERT3P(lwb->lwb_write_zio, ==, NULL); |
837 | ASSERT3P(lwb->lwb_root_zio, ==, NULL); | |
eda3fcd5 | 838 | ASSERT3U(lwb->lwb_alloc_txg, <=, spa_syncing_txg(zilog->zl_spa)); |
2fe61a7e | 839 | ASSERT3U(lwb->lwb_max_txg, <=, spa_syncing_txg(zilog->zl_spa)); |
bbcf18c2 AM |
840 | VERIFY(list_is_empty(&lwb->lwb_itxs)); |
841 | VERIFY(list_is_empty(&lwb->lwb_waiters)); | |
842 | ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); | |
843 | ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock)); | |
1ce23dca PS |
844 | |
845 | /* | |
846 | * Clear the zilog's field to indicate this lwb is no longer | |
847 | * valid, and prevent use-after-free errors. | |
848 | */ | |
849 | if (zilog->zl_last_lwb_opened == lwb) | |
850 | zilog->zl_last_lwb_opened = NULL; | |
851 | ||
852 | kmem_cache_free(zil_lwb_cache, lwb); | |
853 | } | |
854 | ||
29809a6c MA |
855 | /* |
856 | * Called when we create in-memory log transactions so that we know | |
857 | * to cleanup the itxs at the end of spa_sync(). | |
858 | */ | |
65c7cc49 | 859 | static void |
29809a6c MA |
860 | zilog_dirty(zilog_t *zilog, uint64_t txg) |
861 | { | |
862 | dsl_pool_t *dp = zilog->zl_dmu_pool; | |
863 | dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); | |
864 | ||
1ce23dca PS |
865 | ASSERT(spa_writeable(zilog->zl_spa)); |
866 | ||
0c66c32d | 867 | if (ds->ds_is_snapshot) |
29809a6c MA |
868 | panic("dirtying snapshot!"); |
869 | ||
13fe0198 | 870 | if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) { |
29809a6c MA |
871 | /* up the hold count until we can be written out */ |
872 | dmu_buf_add_ref(ds->ds_dbuf, zilog); | |
1ce23dca PS |
873 | |
874 | zilog->zl_dirty_max_txg = MAX(txg, zilog->zl_dirty_max_txg); | |
29809a6c MA |
875 | } |
876 | } | |
877 | ||
55922e73 GW |
878 | /* |
879 | * Determine if the zil is dirty in the specified txg. Callers wanting to | |
880 | * ensure that the dirty state does not change must hold the itxg_lock for | |
881 | * the specified txg. Holding the lock will ensure that the zil cannot be | |
882 | * dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current | |
883 | * state. | |
884 | */ | |
65c7cc49 | 885 | static boolean_t __maybe_unused |
55922e73 GW |
886 | zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg) |
887 | { | |
888 | dsl_pool_t *dp = zilog->zl_dmu_pool; | |
889 | ||
890 | if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK)) | |
891 | return (B_TRUE); | |
892 | return (B_FALSE); | |
893 | } | |
894 | ||
895 | /* | |
896 | * Determine if the zil is dirty. The zil is considered dirty if it has | |
897 | * any pending itx records that have not been cleaned by zil_clean(). | |
898 | */ | |
65c7cc49 | 899 | static boolean_t |
29809a6c MA |
900 | zilog_is_dirty(zilog_t *zilog) |
901 | { | |
902 | dsl_pool_t *dp = zilog->zl_dmu_pool; | |
29809a6c | 903 | |
1c27024e | 904 | for (int t = 0; t < TXG_SIZE; t++) { |
29809a6c MA |
905 | if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t)) |
906 | return (B_TRUE); | |
907 | } | |
908 | return (B_FALSE); | |
909 | } | |
910 | ||
361a7e82 JP |
911 | /* |
912 | * Its called in zil_commit context (zil_process_commit_list()/zil_create()). | |
913 | * It activates SPA_FEATURE_ZILSAXATTR feature, if its enabled. | |
914 | * Check dsl_dataset_feature_is_active to avoid txg_wait_synced() on every | |
915 | * zil_commit. | |
916 | */ | |
917 | static void | |
918 | zil_commit_activate_saxattr_feature(zilog_t *zilog) | |
919 | { | |
920 | dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); | |
921 | uint64_t txg = 0; | |
922 | dmu_tx_t *tx = NULL; | |
923 | ||
dbf6108b | 924 | if (spa_feature_is_enabled(zilog->zl_spa, SPA_FEATURE_ZILSAXATTR) && |
361a7e82 | 925 | dmu_objset_type(zilog->zl_os) != DMU_OST_ZVOL && |
dbf6108b | 926 | !dsl_dataset_feature_is_active(ds, SPA_FEATURE_ZILSAXATTR)) { |
361a7e82 JP |
927 | tx = dmu_tx_create(zilog->zl_os); |
928 | VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); | |
929 | dsl_dataset_dirty(ds, tx); | |
930 | txg = dmu_tx_get_txg(tx); | |
931 | ||
932 | mutex_enter(&ds->ds_lock); | |
933 | ds->ds_feature_activation[SPA_FEATURE_ZILSAXATTR] = | |
934 | (void *)B_TRUE; | |
935 | mutex_exit(&ds->ds_lock); | |
936 | dmu_tx_commit(tx); | |
937 | txg_wait_synced(zilog->zl_dmu_pool, txg); | |
938 | } | |
939 | } | |
940 | ||
34dc7c2f BB |
941 | /* |
942 | * Create an on-disk intent log. | |
943 | */ | |
428870ff | 944 | static lwb_t * |
34dc7c2f BB |
945 | zil_create(zilog_t *zilog) |
946 | { | |
947 | const zil_header_t *zh = zilog->zl_header; | |
428870ff | 948 | lwb_t *lwb = NULL; |
34dc7c2f BB |
949 | uint64_t txg = 0; |
950 | dmu_tx_t *tx = NULL; | |
951 | blkptr_t blk; | |
952 | int error = 0; | |
1b7c1e5c | 953 | boolean_t slog = FALSE; |
361a7e82 JP |
954 | dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); |
955 | ||
34dc7c2f BB |
956 | |
957 | /* | |
958 | * Wait for any previous destroy to complete. | |
959 | */ | |
960 | txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); | |
961 | ||
962 | ASSERT(zh->zh_claim_txg == 0); | |
963 | ASSERT(zh->zh_replay_seq == 0); | |
964 | ||
965 | blk = zh->zh_log; | |
966 | ||
967 | /* | |
428870ff BB |
968 | * Allocate an initial log block if: |
969 | * - there isn't one already | |
4e33ba4c | 970 | * - the existing block is the wrong endianness |
34dc7c2f | 971 | */ |
fb5f0bc8 | 972 | if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { |
34dc7c2f | 973 | tx = dmu_tx_create(zilog->zl_os); |
1ce23dca | 974 | VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); |
34dc7c2f BB |
975 | dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); |
976 | txg = dmu_tx_get_txg(tx); | |
977 | ||
fb5f0bc8 | 978 | if (!BP_IS_HOLE(&blk)) { |
d2734cce | 979 | zio_free(zilog->zl_spa, txg, &blk); |
fb5f0bc8 BB |
980 | BP_ZERO(&blk); |
981 | } | |
982 | ||
b5256303 | 983 | error = zio_alloc_zil(zilog->zl_spa, zilog->zl_os, txg, &blk, |
1b7c1e5c | 984 | ZIL_MIN_BLKSZ, &slog); |
34dc7c2f BB |
985 | if (error == 0) |
986 | zil_init_log_chain(zilog, &blk); | |
987 | } | |
988 | ||
989 | /* | |
1ce23dca | 990 | * Allocate a log write block (lwb) for the first log block. |
34dc7c2f | 991 | */ |
428870ff | 992 | if (error == 0) |
eda3fcd5 | 993 | lwb = zil_alloc_lwb(zilog, 0, &blk, slog, txg, LWB_STATE_NEW); |
34dc7c2f BB |
994 | |
995 | /* | |
996 | * If we just allocated the first log block, commit our transaction | |
2fe61a7e | 997 | * and wait for zil_sync() to stuff the block pointer into zh_log. |
34dc7c2f BB |
998 | * (zh is part of the MOS, so we cannot modify it in open context.) |
999 | */ | |
1000 | if (tx != NULL) { | |
361a7e82 JP |
1001 | /* |
1002 | * If "zilsaxattr" feature is enabled on zpool, then activate | |
1003 | * it now when we're creating the ZIL chain. We can't wait with | |
1004 | * this until we write the first xattr log record because we | |
1005 | * need to wait for the feature activation to sync out. | |
1006 | */ | |
1007 | if (spa_feature_is_enabled(zilog->zl_spa, | |
1008 | SPA_FEATURE_ZILSAXATTR) && dmu_objset_type(zilog->zl_os) != | |
1009 | DMU_OST_ZVOL) { | |
1010 | mutex_enter(&ds->ds_lock); | |
1011 | ds->ds_feature_activation[SPA_FEATURE_ZILSAXATTR] = | |
1012 | (void *)B_TRUE; | |
1013 | mutex_exit(&ds->ds_lock); | |
1014 | } | |
1015 | ||
34dc7c2f BB |
1016 | dmu_tx_commit(tx); |
1017 | txg_wait_synced(zilog->zl_dmu_pool, txg); | |
361a7e82 JP |
1018 | } else { |
1019 | /* | |
1020 | * This branch covers the case where we enable the feature on a | |
1021 | * zpool that has existing ZIL headers. | |
1022 | */ | |
1023 | zil_commit_activate_saxattr_feature(zilog); | |
34dc7c2f | 1024 | } |
361a7e82 JP |
1025 | IMPLY(spa_feature_is_enabled(zilog->zl_spa, SPA_FEATURE_ZILSAXATTR) && |
1026 | dmu_objset_type(zilog->zl_os) != DMU_OST_ZVOL, | |
1027 | dsl_dataset_feature_is_active(ds, SPA_FEATURE_ZILSAXATTR)); | |
34dc7c2f | 1028 | |
861166b0 | 1029 | ASSERT(error != 0 || memcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); |
c04812f9 | 1030 | IMPLY(error == 0, lwb != NULL); |
428870ff BB |
1031 | |
1032 | return (lwb); | |
34dc7c2f BB |
1033 | } |
1034 | ||
1035 | /* | |
1ce23dca PS |
1036 | * In one tx, free all log blocks and clear the log header. If keep_first |
1037 | * is set, then we're replaying a log with no content. We want to keep the | |
1038 | * first block, however, so that the first synchronous transaction doesn't | |
1039 | * require a txg_wait_synced() in zil_create(). We don't need to | |
1040 | * txg_wait_synced() here either when keep_first is set, because both | |
1041 | * zil_create() and zil_destroy() will wait for any in-progress destroys | |
1042 | * to complete. | |
e197bb24 | 1043 | * Return B_TRUE if there were any entries to replay. |
34dc7c2f | 1044 | */ |
e197bb24 | 1045 | boolean_t |
34dc7c2f BB |
1046 | zil_destroy(zilog_t *zilog, boolean_t keep_first) |
1047 | { | |
1048 | const zil_header_t *zh = zilog->zl_header; | |
1049 | lwb_t *lwb; | |
1050 | dmu_tx_t *tx; | |
1051 | uint64_t txg; | |
1052 | ||
1053 | /* | |
1054 | * Wait for any previous destroy to complete. | |
1055 | */ | |
1056 | txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); | |
1057 | ||
428870ff BB |
1058 | zilog->zl_old_header = *zh; /* debugging aid */ |
1059 | ||
34dc7c2f | 1060 | if (BP_IS_HOLE(&zh->zh_log)) |
e197bb24 | 1061 | return (B_FALSE); |
34dc7c2f BB |
1062 | |
1063 | tx = dmu_tx_create(zilog->zl_os); | |
1ce23dca | 1064 | VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); |
34dc7c2f BB |
1065 | dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); |
1066 | txg = dmu_tx_get_txg(tx); | |
1067 | ||
1068 | mutex_enter(&zilog->zl_lock); | |
1069 | ||
34dc7c2f BB |
1070 | ASSERT3U(zilog->zl_destroy_txg, <, txg); |
1071 | zilog->zl_destroy_txg = txg; | |
1072 | zilog->zl_keep_first = keep_first; | |
1073 | ||
1074 | if (!list_is_empty(&zilog->zl_lwb_list)) { | |
1075 | ASSERT(zh->zh_claim_txg == 0); | |
3e31d2b0 | 1076 | VERIFY(!keep_first); |
895e0313 | 1077 | while ((lwb = list_remove_head(&zilog->zl_lwb_list)) != NULL) { |
34dc7c2f BB |
1078 | if (lwb->lwb_buf != NULL) |
1079 | zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); | |
eda3fcd5 AM |
1080 | if (!BP_IS_HOLE(&lwb->lwb_blk)) |
1081 | zio_free(zilog->zl_spa, txg, &lwb->lwb_blk); | |
1ce23dca | 1082 | zil_free_lwb(zilog, lwb); |
34dc7c2f | 1083 | } |
428870ff | 1084 | } else if (!keep_first) { |
29809a6c | 1085 | zil_destroy_sync(zilog, tx); |
34dc7c2f BB |
1086 | } |
1087 | mutex_exit(&zilog->zl_lock); | |
1088 | ||
1089 | dmu_tx_commit(tx); | |
e197bb24 AS |
1090 | |
1091 | return (B_TRUE); | |
34dc7c2f BB |
1092 | } |
1093 | ||
29809a6c MA |
1094 | void |
1095 | zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx) | |
1096 | { | |
1097 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); | |
1098 | (void) zil_parse(zilog, zil_free_log_block, | |
b5256303 | 1099 | zil_free_log_record, tx, zilog->zl_header->zh_claim_txg, B_FALSE); |
29809a6c MA |
1100 | } |
1101 | ||
34dc7c2f | 1102 | int |
9c43027b | 1103 | zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg) |
34dc7c2f BB |
1104 | { |
1105 | dmu_tx_t *tx = txarg; | |
34dc7c2f | 1106 | zilog_t *zilog; |
d2734cce | 1107 | uint64_t first_txg; |
34dc7c2f BB |
1108 | zil_header_t *zh; |
1109 | objset_t *os; | |
1110 | int error; | |
1111 | ||
9c43027b | 1112 | error = dmu_objset_own_obj(dp, ds->ds_object, |
b5256303 | 1113 | DMU_OST_ANY, B_FALSE, B_FALSE, FTAG, &os); |
13fe0198 | 1114 | if (error != 0) { |
6d9036f3 MA |
1115 | /* |
1116 | * EBUSY indicates that the objset is inconsistent, in which | |
1117 | * case it can not have a ZIL. | |
1118 | */ | |
1119 | if (error != EBUSY) { | |
9c43027b AJ |
1120 | cmn_err(CE_WARN, "can't open objset for %llu, error %u", |
1121 | (unsigned long long)ds->ds_object, error); | |
6d9036f3 MA |
1122 | } |
1123 | ||
34dc7c2f BB |
1124 | return (0); |
1125 | } | |
1126 | ||
1127 | zilog = dmu_objset_zil(os); | |
1128 | zh = zil_header_in_syncing_context(zilog); | |
d2734cce SD |
1129 | ASSERT3U(tx->tx_txg, ==, spa_first_txg(zilog->zl_spa)); |
1130 | first_txg = spa_min_claim_txg(zilog->zl_spa); | |
34dc7c2f | 1131 | |
d2734cce SD |
1132 | /* |
1133 | * If the spa_log_state is not set to be cleared, check whether | |
1134 | * the current uberblock is a checkpoint one and if the current | |
1135 | * header has been claimed before moving on. | |
1136 | * | |
1137 | * If the current uberblock is a checkpointed uberblock then | |
1138 | * one of the following scenarios took place: | |
1139 | * | |
1140 | * 1] We are currently rewinding to the checkpoint of the pool. | |
1141 | * 2] We crashed in the middle of a checkpoint rewind but we | |
1142 | * did manage to write the checkpointed uberblock to the | |
1143 | * vdev labels, so when we tried to import the pool again | |
1144 | * the checkpointed uberblock was selected from the import | |
1145 | * procedure. | |
1146 | * | |
1147 | * In both cases we want to zero out all the ZIL blocks, except | |
1148 | * the ones that have been claimed at the time of the checkpoint | |
1149 | * (their zh_claim_txg != 0). The reason is that these blocks | |
1150 | * may be corrupted since we may have reused their locations on | |
1151 | * disk after we took the checkpoint. | |
1152 | * | |
1153 | * We could try to set spa_log_state to SPA_LOG_CLEAR earlier | |
1154 | * when we first figure out whether the current uberblock is | |
1155 | * checkpointed or not. Unfortunately, that would discard all | |
1156 | * the logs, including the ones that are claimed, and we would | |
1157 | * leak space. | |
1158 | */ | |
1159 | if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR || | |
1160 | (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 && | |
1161 | zh->zh_claim_txg == 0)) { | |
1162 | if (!BP_IS_HOLE(&zh->zh_log)) { | |
1163 | (void) zil_parse(zilog, zil_clear_log_block, | |
1164 | zil_noop_log_record, tx, first_txg, B_FALSE); | |
1165 | } | |
9babb374 | 1166 | BP_ZERO(&zh->zh_log); |
b5256303 | 1167 | if (os->os_encrypted) |
1b66810b | 1168 | os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; |
9babb374 | 1169 | dsl_dataset_dirty(dmu_objset_ds(os), tx); |
b5256303 | 1170 | dmu_objset_disown(os, B_FALSE, FTAG); |
428870ff | 1171 | return (0); |
9babb374 BB |
1172 | } |
1173 | ||
d2734cce SD |
1174 | /* |
1175 | * If we are not rewinding and opening the pool normally, then | |
1176 | * the min_claim_txg should be equal to the first txg of the pool. | |
1177 | */ | |
1178 | ASSERT3U(first_txg, ==, spa_first_txg(zilog->zl_spa)); | |
1179 | ||
34dc7c2f BB |
1180 | /* |
1181 | * Claim all log blocks if we haven't already done so, and remember | |
1182 | * the highest claimed sequence number. This ensures that if we can | |
1183 | * read only part of the log now (e.g. due to a missing device), | |
1184 | * but we can read the entire log later, we will not try to replay | |
1185 | * or destroy beyond the last block we successfully claimed. | |
1186 | */ | |
1187 | ASSERT3U(zh->zh_claim_txg, <=, first_txg); | |
1188 | if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { | |
428870ff | 1189 | (void) zil_parse(zilog, zil_claim_log_block, |
b5256303 | 1190 | zil_claim_log_record, tx, first_txg, B_FALSE); |
428870ff BB |
1191 | zh->zh_claim_txg = first_txg; |
1192 | zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq; | |
1193 | zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq; | |
1194 | if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1) | |
1195 | zh->zh_flags |= ZIL_REPLAY_NEEDED; | |
1196 | zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID; | |
d53bd7f5 | 1197 | if (os->os_encrypted) |
1b66810b | 1198 | os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; |
34dc7c2f BB |
1199 | dsl_dataset_dirty(dmu_objset_ds(os), tx); |
1200 | } | |
1201 | ||
1202 | ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); | |
b5256303 | 1203 | dmu_objset_disown(os, B_FALSE, FTAG); |
34dc7c2f BB |
1204 | return (0); |
1205 | } | |
1206 | ||
b128c09f BB |
1207 | /* |
1208 | * Check the log by walking the log chain. | |
1209 | * Checksum errors are ok as they indicate the end of the chain. | |
1210 | * Any other error (no device or read failure) returns an error. | |
1211 | */ | |
b128c09f | 1212 | int |
9c43027b | 1213 | zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx) |
b128c09f | 1214 | { |
14e4e3cb | 1215 | (void) dp; |
b128c09f | 1216 | zilog_t *zilog; |
b128c09f | 1217 | objset_t *os; |
572e2857 | 1218 | blkptr_t *bp; |
b128c09f BB |
1219 | int error; |
1220 | ||
428870ff BB |
1221 | ASSERT(tx == NULL); |
1222 | ||
9c43027b | 1223 | error = dmu_objset_from_ds(ds, &os); |
13fe0198 | 1224 | if (error != 0) { |
9c43027b AJ |
1225 | cmn_err(CE_WARN, "can't open objset %llu, error %d", |
1226 | (unsigned long long)ds->ds_object, error); | |
b128c09f BB |
1227 | return (0); |
1228 | } | |
1229 | ||
1230 | zilog = dmu_objset_zil(os); | |
572e2857 BB |
1231 | bp = (blkptr_t *)&zilog->zl_header->zh_log; |
1232 | ||
572e2857 BB |
1233 | if (!BP_IS_HOLE(bp)) { |
1234 | vdev_t *vd; | |
1235 | boolean_t valid = B_TRUE; | |
1236 | ||
d2734cce SD |
1237 | /* |
1238 | * Check the first block and determine if it's on a log device | |
1239 | * which may have been removed or faulted prior to loading this | |
1240 | * pool. If so, there's no point in checking the rest of the | |
1241 | * log as its content should have already been synced to the | |
1242 | * pool. | |
1243 | */ | |
572e2857 BB |
1244 | spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER); |
1245 | vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0])); | |
1246 | if (vd->vdev_islog && vdev_is_dead(vd)) | |
1247 | valid = vdev_log_state_valid(vd); | |
1248 | spa_config_exit(os->os_spa, SCL_STATE, FTAG); | |
1249 | ||
9c43027b | 1250 | if (!valid) |
572e2857 | 1251 | return (0); |
d2734cce SD |
1252 | |
1253 | /* | |
1254 | * Check whether the current uberblock is checkpointed (e.g. | |
1255 | * we are rewinding) and whether the current header has been | |
1256 | * claimed or not. If it hasn't then skip verifying it. We | |
1257 | * do this because its ZIL blocks may be part of the pool's | |
1258 | * state before the rewind, which is no longer valid. | |
1259 | */ | |
1260 | zil_header_t *zh = zil_header_in_syncing_context(zilog); | |
1261 | if (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 && | |
1262 | zh->zh_claim_txg == 0) | |
1263 | return (0); | |
572e2857 | 1264 | } |
b128c09f | 1265 | |
428870ff BB |
1266 | /* |
1267 | * Because tx == NULL, zil_claim_log_block() will not actually claim | |
1268 | * any blocks, but just determine whether it is possible to do so. | |
1269 | * In addition to checking the log chain, zil_claim_log_block() | |
1270 | * will invoke zio_claim() with a done func of spa_claim_notify(), | |
1271 | * which will update spa_max_claim_txg. See spa_load() for details. | |
1272 | */ | |
1273 | error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx, | |
d2734cce SD |
1274 | zilog->zl_header->zh_claim_txg ? -1ULL : |
1275 | spa_min_claim_txg(os->os_spa), B_FALSE); | |
428870ff | 1276 | |
428870ff | 1277 | return ((error == ECKSUM || error == ENOENT) ? 0 : error); |
b128c09f BB |
1278 | } |
1279 | ||
1ce23dca PS |
1280 | /* |
1281 | * When an itx is "skipped", this function is used to properly mark the | |
1282 | * waiter as "done, and signal any thread(s) waiting on it. An itx can | |
1283 | * be skipped (and not committed to an lwb) for a variety of reasons, | |
1284 | * one of them being that the itx was committed via spa_sync(), prior to | |
1285 | * it being committed to an lwb; this can happen if a thread calling | |
1286 | * zil_commit() is racing with spa_sync(). | |
1287 | */ | |
1288 | static void | |
1289 | zil_commit_waiter_skip(zil_commit_waiter_t *zcw) | |
34dc7c2f | 1290 | { |
1ce23dca PS |
1291 | mutex_enter(&zcw->zcw_lock); |
1292 | ASSERT3B(zcw->zcw_done, ==, B_FALSE); | |
1293 | zcw->zcw_done = B_TRUE; | |
1294 | cv_broadcast(&zcw->zcw_cv); | |
1295 | mutex_exit(&zcw->zcw_lock); | |
1296 | } | |
34dc7c2f | 1297 | |
1ce23dca PS |
1298 | /* |
1299 | * This function is used when the given waiter is to be linked into an | |
1300 | * lwb's "lwb_waiter" list; i.e. when the itx is committed to the lwb. | |
1301 | * At this point, the waiter will no longer be referenced by the itx, | |
1302 | * and instead, will be referenced by the lwb. | |
1303 | */ | |
1304 | static void | |
1305 | zil_commit_waiter_link_lwb(zil_commit_waiter_t *zcw, lwb_t *lwb) | |
1306 | { | |
2fe61a7e PS |
1307 | /* |
1308 | * The lwb_waiters field of the lwb is protected by the zilog's | |
eda3fcd5 AM |
1309 | * zl_issuer_lock while the lwb is open and zl_lock otherwise. |
1310 | * zl_issuer_lock also protects leaving the open state. | |
1311 | * zcw_lwb setting is protected by zl_issuer_lock and state != | |
1312 | * flush_done, which transition is protected by zl_lock. | |
2fe61a7e | 1313 | */ |
eda3fcd5 AM |
1314 | ASSERT(MUTEX_HELD(&lwb->lwb_zilog->zl_issuer_lock)); |
1315 | IMPLY(lwb->lwb_state != LWB_STATE_OPENED, | |
1316 | MUTEX_HELD(&lwb->lwb_zilog->zl_lock)); | |
1317 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_NEW); | |
1318 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE); | |
2fe61a7e | 1319 | |
1ce23dca | 1320 | ASSERT(!list_link_active(&zcw->zcw_node)); |
1ce23dca | 1321 | list_insert_tail(&lwb->lwb_waiters, zcw); |
eda3fcd5 | 1322 | ASSERT3P(zcw->zcw_lwb, ==, NULL); |
1ce23dca | 1323 | zcw->zcw_lwb = lwb; |
1ce23dca PS |
1324 | } |
1325 | ||
1326 | /* | |
1327 | * This function is used when zio_alloc_zil() fails to allocate a ZIL | |
1328 | * block, and the given waiter must be linked to the "nolwb waiters" | |
1329 | * list inside of zil_process_commit_list(). | |
1330 | */ | |
1331 | static void | |
1332 | zil_commit_waiter_link_nolwb(zil_commit_waiter_t *zcw, list_t *nolwb) | |
1333 | { | |
1ce23dca | 1334 | ASSERT(!list_link_active(&zcw->zcw_node)); |
1ce23dca | 1335 | list_insert_tail(nolwb, zcw); |
eda3fcd5 | 1336 | ASSERT3P(zcw->zcw_lwb, ==, NULL); |
34dc7c2f BB |
1337 | } |
1338 | ||
1339 | void | |
1ce23dca | 1340 | zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp) |
34dc7c2f | 1341 | { |
1ce23dca | 1342 | avl_tree_t *t = &lwb->lwb_vdev_tree; |
34dc7c2f BB |
1343 | avl_index_t where; |
1344 | zil_vdev_node_t *zv, zvsearch; | |
1345 | int ndvas = BP_GET_NDVAS(bp); | |
1346 | int i; | |
1347 | ||
bbcf18c2 AM |
1348 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE); |
1349 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE); | |
1350 | ||
53b1f5ea | 1351 | if (zil_nocacheflush) |
34dc7c2f BB |
1352 | return; |
1353 | ||
1ce23dca | 1354 | mutex_enter(&lwb->lwb_vdev_lock); |
34dc7c2f BB |
1355 | for (i = 0; i < ndvas; i++) { |
1356 | zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]); | |
1357 | if (avl_find(t, &zvsearch, &where) == NULL) { | |
79c76d5b | 1358 | zv = kmem_alloc(sizeof (*zv), KM_SLEEP); |
34dc7c2f BB |
1359 | zv->zv_vdev = zvsearch.zv_vdev; |
1360 | avl_insert(t, zv, where); | |
1361 | } | |
1362 | } | |
1ce23dca | 1363 | mutex_exit(&lwb->lwb_vdev_lock); |
34dc7c2f BB |
1364 | } |
1365 | ||
900d09b2 PS |
1366 | static void |
1367 | zil_lwb_flush_defer(lwb_t *lwb, lwb_t *nlwb) | |
1368 | { | |
1369 | avl_tree_t *src = &lwb->lwb_vdev_tree; | |
1370 | avl_tree_t *dst = &nlwb->lwb_vdev_tree; | |
1371 | void *cookie = NULL; | |
1372 | zil_vdev_node_t *zv; | |
1373 | ||
1374 | ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE); | |
1375 | ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_WRITE_DONE); | |
1376 | ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_FLUSH_DONE); | |
1377 | ||
1378 | /* | |
1379 | * While 'lwb' is at a point in its lifetime where lwb_vdev_tree does | |
1380 | * not need the protection of lwb_vdev_lock (it will only be modified | |
1381 | * while holding zilog->zl_lock) as its writes and those of its | |
1382 | * children have all completed. The younger 'nlwb' may be waiting on | |
1383 | * future writes to additional vdevs. | |
1384 | */ | |
1385 | mutex_enter(&nlwb->lwb_vdev_lock); | |
1386 | /* | |
1387 | * Tear down the 'lwb' vdev tree, ensuring that entries which do not | |
1388 | * exist in 'nlwb' are moved to it, freeing any would-be duplicates. | |
1389 | */ | |
1390 | while ((zv = avl_destroy_nodes(src, &cookie)) != NULL) { | |
1391 | avl_index_t where; | |
1392 | ||
1393 | if (avl_find(dst, zv, &where) == NULL) { | |
1394 | avl_insert(dst, zv, where); | |
1395 | } else { | |
1396 | kmem_free(zv, sizeof (*zv)); | |
1397 | } | |
1398 | } | |
1399 | mutex_exit(&nlwb->lwb_vdev_lock); | |
1400 | } | |
1401 | ||
1ce23dca PS |
1402 | void |
1403 | zil_lwb_add_txg(lwb_t *lwb, uint64_t txg) | |
1404 | { | |
1405 | lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); | |
1406 | } | |
1407 | ||
1408 | /* | |
900d09b2 | 1409 | * This function is a called after all vdevs associated with a given lwb |
1ce23dca | 1410 | * write have completed their DKIOCFLUSHWRITECACHE command; or as soon |
900d09b2 PS |
1411 | * as the lwb write completes, if "zil_nocacheflush" is set. Further, |
1412 | * all "previous" lwb's will have completed before this function is | |
1413 | * called; i.e. this function is called for all previous lwbs before | |
1414 | * it's called for "this" lwb (enforced via zio the dependencies | |
1415 | * configured in zil_lwb_set_zio_dependency()). | |
1ce23dca PS |
1416 | * |
1417 | * The intention is for this function to be called as soon as the | |
1418 | * contents of an lwb are considered "stable" on disk, and will survive | |
1419 | * any sudden loss of power. At this point, any threads waiting for the | |
1420 | * lwb to reach this state are signalled, and the "waiter" structures | |
1421 | * are marked "done". | |
1422 | */ | |
572e2857 | 1423 | static void |
1ce23dca | 1424 | zil_lwb_flush_vdevs_done(zio_t *zio) |
34dc7c2f | 1425 | { |
1ce23dca PS |
1426 | lwb_t *lwb = zio->io_private; |
1427 | zilog_t *zilog = lwb->lwb_zilog; | |
1ce23dca PS |
1428 | zil_commit_waiter_t *zcw; |
1429 | itx_t *itx; | |
1430 | ||
a604d324 GW |
1431 | spa_config_exit(zilog->zl_spa, SCL_STATE, lwb); |
1432 | ||
895e0313 | 1433 | hrtime_t t = gethrtime() - lwb->lwb_issued_timestamp; |
34dc7c2f | 1434 | |
1ce23dca | 1435 | mutex_enter(&zilog->zl_lock); |
34dc7c2f | 1436 | |
895e0313 | 1437 | zilog->zl_last_lwb_latency = (zilog->zl_last_lwb_latency * 7 + t) / 8; |
34dc7c2f | 1438 | |
1ce23dca | 1439 | lwb->lwb_root_zio = NULL; |
900d09b2 | 1440 | |
b1b99e10 AM |
1441 | ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE); |
1442 | lwb->lwb_state = LWB_STATE_FLUSH_DONE; | |
1443 | ||
1ce23dca PS |
1444 | if (zilog->zl_last_lwb_opened == lwb) { |
1445 | /* | |
1446 | * Remember the highest committed log sequence number | |
1447 | * for ztest. We only update this value when all the log | |
1448 | * writes succeeded, because ztest wants to ASSERT that | |
1449 | * it got the whole log chain. | |
1450 | */ | |
1451 | zilog->zl_commit_lr_seq = zilog->zl_lr_seq; | |
1452 | } | |
1453 | ||
b1b99e10 | 1454 | while ((itx = list_remove_head(&lwb->lwb_itxs)) != NULL) |
1ce23dca | 1455 | zil_itx_destroy(itx); |
1ce23dca | 1456 | |
b1b99e10 | 1457 | while ((zcw = list_remove_head(&lwb->lwb_waiters)) != NULL) { |
1ce23dca PS |
1458 | mutex_enter(&zcw->zcw_lock); |
1459 | ||
b1b99e10 | 1460 | ASSERT3P(zcw->zcw_lwb, ==, lwb); |
1ce23dca | 1461 | zcw->zcw_lwb = NULL; |
f82f0279 AK |
1462 | /* |
1463 | * We expect any ZIO errors from child ZIOs to have been | |
1464 | * propagated "up" to this specific LWB's root ZIO, in | |
1465 | * order for this error handling to work correctly. This | |
1466 | * includes ZIO errors from either this LWB's write or | |
1467 | * flush, as well as any errors from other dependent LWBs | |
1468 | * (e.g. a root LWB ZIO that might be a child of this LWB). | |
1469 | * | |
1470 | * With that said, it's important to note that LWB flush | |
1471 | * errors are not propagated up to the LWB root ZIO. | |
1472 | * This is incorrect behavior, and results in VDEV flush | |
1473 | * errors not being handled correctly here. See the | |
1474 | * comment above the call to "zio_flush" for details. | |
1475 | */ | |
1ce23dca PS |
1476 | |
1477 | zcw->zcw_zio_error = zio->io_error; | |
1478 | ||
1479 | ASSERT3B(zcw->zcw_done, ==, B_FALSE); | |
1480 | zcw->zcw_done = B_TRUE; | |
1481 | cv_broadcast(&zcw->zcw_cv); | |
1482 | ||
1483 | mutex_exit(&zcw->zcw_lock); | |
34dc7c2f | 1484 | } |
b1b99e10 AM |
1485 | |
1486 | uint64_t txg = lwb->lwb_issued_txg; | |
1487 | ||
1488 | /* Once we drop the lock, lwb may be freed by zil_sync(). */ | |
1489 | mutex_exit(&zilog->zl_lock); | |
34dc7c2f | 1490 | |
152d6fda | 1491 | mutex_enter(&zilog->zl_lwb_io_lock); |
152d6fda KJ |
1492 | ASSERT3U(zilog->zl_lwb_inflight[txg & TXG_MASK], >, 0); |
1493 | zilog->zl_lwb_inflight[txg & TXG_MASK]--; | |
1494 | if (zilog->zl_lwb_inflight[txg & TXG_MASK] == 0) | |
1495 | cv_broadcast(&zilog->zl_lwb_io_cv); | |
1496 | mutex_exit(&zilog->zl_lwb_io_lock); | |
1497 | } | |
1498 | ||
1499 | /* | |
1500 | * Wait for the completion of all issued write/flush of that txg provided. | |
1501 | * It guarantees zil_lwb_flush_vdevs_done() is called and returned. | |
1502 | */ | |
1503 | static void | |
1504 | zil_lwb_flush_wait_all(zilog_t *zilog, uint64_t txg) | |
1505 | { | |
1506 | ASSERT3U(txg, ==, spa_syncing_txg(zilog->zl_spa)); | |
1507 | ||
1508 | mutex_enter(&zilog->zl_lwb_io_lock); | |
1509 | while (zilog->zl_lwb_inflight[txg & TXG_MASK] > 0) | |
1510 | cv_wait(&zilog->zl_lwb_io_cv, &zilog->zl_lwb_io_lock); | |
1511 | mutex_exit(&zilog->zl_lwb_io_lock); | |
1512 | ||
1513 | #ifdef ZFS_DEBUG | |
1514 | mutex_enter(&zilog->zl_lock); | |
1515 | mutex_enter(&zilog->zl_lwb_io_lock); | |
1516 | lwb_t *lwb = list_head(&zilog->zl_lwb_list); | |
eda3fcd5 | 1517 | while (lwb != NULL) { |
152d6fda KJ |
1518 | if (lwb->lwb_issued_txg <= txg) { |
1519 | ASSERT(lwb->lwb_state != LWB_STATE_ISSUED); | |
1520 | ASSERT(lwb->lwb_state != LWB_STATE_WRITE_DONE); | |
1521 | IMPLY(lwb->lwb_issued_txg > 0, | |
1522 | lwb->lwb_state == LWB_STATE_FLUSH_DONE); | |
1523 | } | |
7381ddf1 AM |
1524 | IMPLY(lwb->lwb_state == LWB_STATE_WRITE_DONE || |
1525 | lwb->lwb_state == LWB_STATE_FLUSH_DONE, | |
152d6fda KJ |
1526 | lwb->lwb_buf == NULL); |
1527 | lwb = list_next(&zilog->zl_lwb_list, lwb); | |
1528 | } | |
1529 | mutex_exit(&zilog->zl_lwb_io_lock); | |
1530 | mutex_exit(&zilog->zl_lock); | |
1531 | #endif | |
34dc7c2f BB |
1532 | } |
1533 | ||
1534 | /* | |
900d09b2 PS |
1535 | * This is called when an lwb's write zio completes. The callback's |
1536 | * purpose is to issue the DKIOCFLUSHWRITECACHE commands for the vdevs | |
1537 | * in the lwb's lwb_vdev_tree. The tree will contain the vdevs involved | |
1538 | * in writing out this specific lwb's data, and in the case that cache | |
1539 | * flushes have been deferred, vdevs involved in writing the data for | |
1540 | * previous lwbs. The writes corresponding to all the vdevs in the | |
1541 | * lwb_vdev_tree will have completed by the time this is called, due to | |
1542 | * the zio dependencies configured in zil_lwb_set_zio_dependency(), | |
1543 | * which takes deferred flushes into account. The lwb will be "done" | |
1544 | * once zil_lwb_flush_vdevs_done() is called, which occurs in the zio | |
1545 | * completion callback for the lwb's root zio. | |
34dc7c2f BB |
1546 | */ |
1547 | static void | |
1548 | zil_lwb_write_done(zio_t *zio) | |
1549 | { | |
1550 | lwb_t *lwb = zio->io_private; | |
1ce23dca | 1551 | spa_t *spa = zio->io_spa; |
34dc7c2f | 1552 | zilog_t *zilog = lwb->lwb_zilog; |
1ce23dca PS |
1553 | avl_tree_t *t = &lwb->lwb_vdev_tree; |
1554 | void *cookie = NULL; | |
1555 | zil_vdev_node_t *zv; | |
900d09b2 | 1556 | lwb_t *nlwb; |
1ce23dca | 1557 | |
a604d324 GW |
1558 | ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0); |
1559 | ||
e2af2acc | 1560 | abd_free(zio->io_abd); |
7381ddf1 AM |
1561 | zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); |
1562 | lwb->lwb_buf = NULL; | |
1ce23dca | 1563 | |
34dc7c2f | 1564 | mutex_enter(&zilog->zl_lock); |
900d09b2 PS |
1565 | ASSERT3S(lwb->lwb_state, ==, LWB_STATE_ISSUED); |
1566 | lwb->lwb_state = LWB_STATE_WRITE_DONE; | |
eda3fcd5 | 1567 | lwb->lwb_child_zio = NULL; |
1ce23dca | 1568 | lwb->lwb_write_zio = NULL; |
90149552 AM |
1569 | |
1570 | /* | |
1571 | * If nlwb is not yet issued, zil_lwb_set_zio_dependency() is not | |
1572 | * called for it yet, and when it will be, it won't be able to make | |
1573 | * its write ZIO a parent this ZIO. In such case we can not defer | |
1574 | * our flushes or below may be a race between the done callbacks. | |
1575 | */ | |
900d09b2 | 1576 | nlwb = list_next(&zilog->zl_lwb_list, lwb); |
90149552 AM |
1577 | if (nlwb && nlwb->lwb_state != LWB_STATE_ISSUED) |
1578 | nlwb = NULL; | |
428870ff | 1579 | mutex_exit(&zilog->zl_lock); |
9babb374 | 1580 | |
1ce23dca PS |
1581 | if (avl_numnodes(t) == 0) |
1582 | return; | |
1583 | ||
9babb374 | 1584 | /* |
1ce23dca PS |
1585 | * If there was an IO error, we're not going to call zio_flush() |
1586 | * on these vdevs, so we simply empty the tree and free the | |
1587 | * nodes. We avoid calling zio_flush() since there isn't any | |
1588 | * good reason for doing so, after the lwb block failed to be | |
1589 | * written out. | |
f82f0279 AK |
1590 | * |
1591 | * Additionally, we don't perform any further error handling at | |
1592 | * this point (e.g. setting "zcw_zio_error" appropriately), as | |
1593 | * we expect that to occur in "zil_lwb_flush_vdevs_done" (thus, | |
1594 | * we expect any error seen here, to have been propagated to | |
1595 | * that function). | |
9babb374 | 1596 | */ |
1ce23dca PS |
1597 | if (zio->io_error != 0) { |
1598 | while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) | |
1599 | kmem_free(zv, sizeof (*zv)); | |
1600 | return; | |
1601 | } | |
1602 | ||
900d09b2 PS |
1603 | /* |
1604 | * If this lwb does not have any threads waiting for it to | |
1605 | * complete, we want to defer issuing the DKIOCFLUSHWRITECACHE | |
1606 | * command to the vdevs written to by "this" lwb, and instead | |
1607 | * rely on the "next" lwb to handle the DKIOCFLUSHWRITECACHE | |
1608 | * command for those vdevs. Thus, we merge the vdev tree of | |
1609 | * "this" lwb with the vdev tree of the "next" lwb in the list, | |
1610 | * and assume the "next" lwb will handle flushing the vdevs (or | |
1611 | * deferring the flush(s) again). | |
1612 | * | |
1613 | * This is a useful performance optimization, especially for | |
1614 | * workloads with lots of async write activity and few sync | |
1615 | * write and/or fsync activity, as it has the potential to | |
1616 | * coalesce multiple flush commands to a vdev into one. | |
1617 | */ | |
895e0313 | 1618 | if (list_is_empty(&lwb->lwb_waiters) && nlwb != NULL) { |
900d09b2 PS |
1619 | zil_lwb_flush_defer(lwb, nlwb); |
1620 | ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); | |
1621 | return; | |
1622 | } | |
1623 | ||
1ce23dca PS |
1624 | while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) { |
1625 | vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev); | |
5a3bffab | 1626 | if (vd != NULL) { |
f82f0279 AK |
1627 | /* |
1628 | * The "ZIO_FLAG_DONT_PROPAGATE" is currently | |
1629 | * always used within "zio_flush". This means, | |
1630 | * any errors when flushing the vdev(s), will | |
1631 | * (unfortunately) not be handled correctly, | |
1632 | * since these "zio_flush" errors will not be | |
1633 | * propagated up to "zil_lwb_flush_vdevs_done". | |
1634 | */ | |
1ce23dca | 1635 | zio_flush(lwb->lwb_root_zio, vd); |
f82f0279 | 1636 | } |
1ce23dca PS |
1637 | kmem_free(zv, sizeof (*zv)); |
1638 | } | |
34dc7c2f BB |
1639 | } |
1640 | ||
eda3fcd5 AM |
1641 | /* |
1642 | * Build the zio dependency chain, which is used to preserve the ordering of | |
1643 | * lwb completions that is required by the semantics of the ZIL. Each new lwb | |
1644 | * zio becomes a parent of the previous lwb zio, such that the new lwb's zio | |
1645 | * cannot complete until the previous lwb's zio completes. | |
1646 | * | |
1647 | * This is required by the semantics of zil_commit(): the commit waiters | |
1648 | * attached to the lwbs will be woken in the lwb zio's completion callback, | |
1649 | * so this zio dependency graph ensures the waiters are woken in the correct | |
1650 | * order (the same order the lwbs were created). | |
1651 | */ | |
900d09b2 PS |
1652 | static void |
1653 | zil_lwb_set_zio_dependency(zilog_t *zilog, lwb_t *lwb) | |
1654 | { | |
900d09b2 PS |
1655 | ASSERT(MUTEX_HELD(&zilog->zl_lock)); |
1656 | ||
eda3fcd5 AM |
1657 | lwb_t *prev_lwb = list_prev(&zilog->zl_lwb_list, lwb); |
1658 | if (prev_lwb == NULL || | |
1659 | prev_lwb->lwb_state == LWB_STATE_FLUSH_DONE) | |
1660 | return; | |
1661 | ||
900d09b2 | 1662 | /* |
eda3fcd5 AM |
1663 | * If the previous lwb's write hasn't already completed, we also want |
1664 | * to order the completion of the lwb write zios (above, we only order | |
1665 | * the completion of the lwb root zios). This is required because of | |
1666 | * how we can defer the DKIOCFLUSHWRITECACHE commands for each lwb. | |
900d09b2 | 1667 | * |
eda3fcd5 AM |
1668 | * When the DKIOCFLUSHWRITECACHE commands are deferred, the previous |
1669 | * lwb will rely on this lwb to flush the vdevs written to by that | |
1670 | * previous lwb. Thus, we need to ensure this lwb doesn't issue the | |
1671 | * flush until after the previous lwb's write completes. We ensure | |
1672 | * this ordering by setting the zio parent/child relationship here. | |
1673 | * | |
1674 | * Without this relationship on the lwb's write zio, it's possible | |
1675 | * for this lwb's write to complete prior to the previous lwb's write | |
1676 | * completing; and thus, the vdevs for the previous lwb would be | |
1677 | * flushed prior to that lwb's data being written to those vdevs (the | |
1678 | * vdevs are flushed in the lwb write zio's completion handler, | |
1679 | * zil_lwb_write_done()). | |
1680 | */ | |
1681 | if (prev_lwb->lwb_state == LWB_STATE_ISSUED) { | |
1682 | ASSERT3P(prev_lwb->lwb_write_zio, !=, NULL); | |
1683 | zio_add_child(lwb->lwb_write_zio, prev_lwb->lwb_write_zio); | |
1684 | } else { | |
1685 | ASSERT3S(prev_lwb->lwb_state, ==, LWB_STATE_WRITE_DONE); | |
900d09b2 | 1686 | } |
eda3fcd5 AM |
1687 | |
1688 | ASSERT3P(prev_lwb->lwb_root_zio, !=, NULL); | |
1689 | zio_add_child(lwb->lwb_root_zio, prev_lwb->lwb_root_zio); | |
900d09b2 PS |
1690 | } |
1691 | ||
1692 | ||
34dc7c2f | 1693 | /* |
1ce23dca | 1694 | * This function's purpose is to "open" an lwb such that it is ready to |
eda3fcd5 AM |
1695 | * accept new itxs being committed to it. This function is idempotent; if |
1696 | * the passed in lwb has already been opened, it is essentially a no-op. | |
34dc7c2f BB |
1697 | */ |
1698 | static void | |
1ce23dca | 1699 | zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb) |
34dc7c2f | 1700 | { |
1b2b0aca | 1701 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca | 1702 | |
eda3fcd5 AM |
1703 | if (lwb->lwb_state != LWB_STATE_NEW) { |
1704 | ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); | |
f63811f0 | 1705 | return; |
eda3fcd5 | 1706 | } |
1ce23dca | 1707 | |
b22bab25 | 1708 | mutex_enter(&zilog->zl_lock); |
f63811f0 | 1709 | lwb->lwb_state = LWB_STATE_OPENED; |
f63811f0 | 1710 | zilog->zl_last_lwb_opened = lwb; |
920dd524 | 1711 | mutex_exit(&zilog->zl_lock); |
34dc7c2f BB |
1712 | } |
1713 | ||
b8738257 MA |
1714 | /* |
1715 | * Maximum block size used by the ZIL. This is picked up when the ZIL is | |
1716 | * initialized. Otherwise this should not be used directly; see | |
1717 | * zl_max_block_size instead. | |
1718 | */ | |
fdc2d303 | 1719 | static uint_t zil_maxblocksize = SPA_OLD_MAXBLOCKSIZE; |
b8738257 | 1720 | |
eff77a80 AM |
1721 | /* |
1722 | * Plan splitting of the provided burst size between several blocks. | |
1723 | */ | |
1724 | static uint_t | |
1725 | zil_lwb_plan(zilog_t *zilog, uint64_t size, uint_t *minsize) | |
1726 | { | |
1727 | uint_t md = zilog->zl_max_block_size - sizeof (zil_chain_t); | |
1728 | ||
1729 | if (size <= md) { | |
1730 | /* | |
1731 | * Small bursts are written as-is in one block. | |
1732 | */ | |
1733 | *minsize = size; | |
1734 | return (size); | |
1735 | } else if (size > 8 * md) { | |
1736 | /* | |
1737 | * Big bursts use maximum blocks. The first block size | |
1738 | * is hard to predict, but it does not really matter. | |
1739 | */ | |
1740 | *minsize = 0; | |
1741 | return (md); | |
1742 | } | |
1743 | ||
1744 | /* | |
1745 | * Medium bursts try to divide evenly to better utilize several SLOG | |
1746 | * VDEVs. The first block size we predict assuming the worst case of | |
1747 | * maxing out others. Fall back to using maximum blocks if due to | |
1748 | * large records or wasted space we can not predict anything better. | |
1749 | */ | |
1750 | uint_t s = size; | |
1751 | uint_t n = DIV_ROUND_UP(s, md - sizeof (lr_write_t)); | |
1752 | uint_t chunk = DIV_ROUND_UP(s, n); | |
1753 | uint_t waste = zil_max_waste_space(zilog); | |
1754 | waste = MAX(waste, zilog->zl_cur_max); | |
1755 | if (chunk <= md - waste) { | |
1756 | *minsize = MAX(s - (md - waste) * (n - 1), waste); | |
1757 | return (chunk); | |
1758 | } else { | |
1759 | *minsize = 0; | |
1760 | return (md); | |
1761 | } | |
1762 | } | |
1763 | ||
1764 | /* | |
1765 | * Try to predict next block size based on previous history. Make prediction | |
1766 | * sufficient for 7 of 8 previous bursts. Don't try to save if the saving is | |
1767 | * less then 50%, extra writes may cost more, but we don't want single spike | |
1768 | * to badly affect our predictions. | |
1769 | */ | |
1770 | static uint_t | |
1771 | zil_lwb_predict(zilog_t *zilog) | |
1772 | { | |
1773 | uint_t m, o; | |
1774 | ||
1775 | /* If we are in the middle of a burst, take it into account also. */ | |
1776 | if (zilog->zl_cur_size > 0) { | |
1777 | o = zil_lwb_plan(zilog, zilog->zl_cur_size, &m); | |
1778 | } else { | |
1779 | o = UINT_MAX; | |
1780 | m = 0; | |
1781 | } | |
1782 | ||
1783 | /* Find minimum optimal size. We don't need to go below that. */ | |
1784 | for (int i = 0; i < ZIL_BURSTS; i++) | |
1785 | o = MIN(o, zilog->zl_prev_opt[i]); | |
1786 | ||
1787 | /* Find two biggest minimal first block sizes above the optimal. */ | |
1788 | uint_t m1 = MAX(m, o), m2 = o; | |
1789 | for (int i = 0; i < ZIL_BURSTS; i++) { | |
1790 | m = zilog->zl_prev_min[i]; | |
1791 | if (m >= m1) { | |
1792 | m2 = m1; | |
1793 | m1 = m; | |
1794 | } else if (m > m2) { | |
1795 | m2 = m; | |
1796 | } | |
1797 | } | |
1798 | ||
1799 | /* | |
1800 | * If second minimum size gives 50% saving -- use it. It may cost us | |
1801 | * one additional write later, but the space saving is just too big. | |
1802 | */ | |
1803 | return ((m1 < m2 * 2) ? m1 : m2); | |
1804 | } | |
1805 | ||
34dc7c2f | 1806 | /* |
f63811f0 AM |
1807 | * Close the log block for being issued and allocate the next one. |
1808 | * Has to be called under zl_issuer_lock to chain more lwbs. | |
34dc7c2f BB |
1809 | */ |
1810 | static lwb_t * | |
eda3fcd5 | 1811 | zil_lwb_write_close(zilog_t *zilog, lwb_t *lwb, lwb_state_t state) |
34dc7c2f | 1812 | { |
eff77a80 | 1813 | uint64_t blksz, plan, plan2; |
428870ff | 1814 | |
1b2b0aca | 1815 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca | 1816 | ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); |
eda3fcd5 | 1817 | lwb->lwb_state = LWB_STATE_CLOSED; |
1ce23dca | 1818 | |
f63811f0 | 1819 | /* |
eda3fcd5 AM |
1820 | * If there was an allocation failure then returned NULL will trigger |
1821 | * zil_commit_writer_stall() at the caller. This is inherently racy, | |
1822 | * since allocation may not have happened yet. | |
34dc7c2f | 1823 | */ |
eda3fcd5 AM |
1824 | if (lwb->lwb_error != 0) |
1825 | return (NULL); | |
34dc7c2f BB |
1826 | |
1827 | /* | |
eff77a80 AM |
1828 | * Log blocks are pre-allocated. Here we select the size of the next |
1829 | * block, based on what's left of this burst and the previous history. | |
1830 | * While we try to only write used part of the block, we can't just | |
1831 | * always allocate the maximum block size because we can exhaust all | |
1832 | * available pool log space, so we try to be reasonable. | |
34dc7c2f | 1833 | */ |
eff77a80 AM |
1834 | if (zilog->zl_cur_left > 0) { |
1835 | /* | |
1836 | * We are in the middle of a burst and know how much is left. | |
1837 | * But if workload is multi-threaded there may be more soon. | |
1838 | * Try to predict what can it be and plan for the worst case. | |
1839 | */ | |
1840 | uint_t m; | |
1841 | plan = zil_lwb_plan(zilog, zilog->zl_cur_left, &m); | |
1842 | if (zilog->zl_parallel) { | |
1843 | plan2 = zil_lwb_plan(zilog, zilog->zl_cur_left + | |
1844 | zil_lwb_predict(zilog), &m); | |
1845 | if (plan < plan2) | |
1846 | plan = plan2; | |
1847 | } | |
1848 | } else { | |
1849 | /* | |
1850 | * The previous burst is done and we can only predict what | |
1851 | * will come next. | |
1852 | */ | |
1853 | plan = zil_lwb_predict(zilog); | |
1854 | } | |
1855 | blksz = plan + sizeof (zil_chain_t); | |
1856 | blksz = P2ROUNDUP_TYPED(blksz, ZIL_MIN_BLKSZ, uint64_t); | |
1857 | blksz = MIN(blksz, zilog->zl_max_block_size); | |
1858 | DTRACE_PROBE3(zil__block__size, zilog_t *, zilog, uint64_t, blksz, | |
1859 | uint64_t, plan); | |
34dc7c2f | 1860 | |
eff77a80 | 1861 | return (zil_alloc_lwb(zilog, blksz, NULL, 0, 0, state)); |
eda3fcd5 | 1862 | } |
34dc7c2f | 1863 | |
eda3fcd5 AM |
1864 | /* |
1865 | * Finalize previously closed block and issue the write zio. | |
1866 | */ | |
1867 | static void | |
1868 | zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb) | |
1869 | { | |
1870 | spa_t *spa = zilog->zl_spa; | |
1871 | zil_chain_t *zilc; | |
1872 | boolean_t slog; | |
1873 | zbookmark_phys_t zb; | |
1874 | zio_priority_t prio; | |
1875 | int error; | |
34dc7c2f | 1876 | |
eda3fcd5 | 1877 | ASSERT3S(lwb->lwb_state, ==, LWB_STATE_CLOSED); |
f63811f0 | 1878 | |
eda3fcd5 AM |
1879 | /* Actually fill the lwb with the data. */ |
1880 | for (itx_t *itx = list_head(&lwb->lwb_itxs); itx; | |
1881 | itx = list_next(&lwb->lwb_itxs, itx)) | |
1882 | zil_lwb_commit(zilog, lwb, itx); | |
1883 | lwb->lwb_nused = lwb->lwb_nfilled; | |
2a27fd41 | 1884 | ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_nmax); |
eda3fcd5 AM |
1885 | |
1886 | lwb->lwb_root_zio = zio_root(spa, zil_lwb_flush_vdevs_done, lwb, | |
1887 | ZIO_FLAG_CANFAIL); | |
f63811f0 | 1888 | |
2cb992a9 | 1889 | /* |
eda3fcd5 AM |
1890 | * The lwb is now ready to be issued, but it can be only if it already |
1891 | * got its block pointer allocated or the allocation has failed. | |
1892 | * Otherwise leave it as-is, relying on some other thread to issue it | |
1893 | * after allocating its block pointer via calling zil_lwb_write_issue() | |
1894 | * for the previous lwb(s) in the chain. | |
2cb992a9 | 1895 | */ |
eda3fcd5 AM |
1896 | mutex_enter(&zilog->zl_lock); |
1897 | lwb->lwb_state = LWB_STATE_READY; | |
1898 | if (BP_IS_HOLE(&lwb->lwb_blk) && lwb->lwb_error == 0) { | |
1899 | mutex_exit(&zilog->zl_lock); | |
1900 | return; | |
1901 | } | |
1902 | mutex_exit(&zilog->zl_lock); | |
1903 | ||
1904 | next_lwb: | |
1905 | if (lwb->lwb_slim) | |
1906 | zilc = (zil_chain_t *)lwb->lwb_buf; | |
1907 | else | |
1908 | zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_nmax); | |
1909 | int wsz = lwb->lwb_sz; | |
1910 | if (lwb->lwb_error == 0) { | |
1911 | abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf, lwb->lwb_sz); | |
eff77a80 | 1912 | if (!lwb->lwb_slog || zilog->zl_cur_size <= zil_slog_bulk) |
eda3fcd5 AM |
1913 | prio = ZIO_PRIORITY_SYNC_WRITE; |
1914 | else | |
1915 | prio = ZIO_PRIORITY_ASYNC_WRITE; | |
1916 | SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET], | |
1917 | ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, | |
1918 | lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]); | |
1919 | lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio, spa, 0, | |
1920 | &lwb->lwb_blk, lwb_abd, lwb->lwb_sz, zil_lwb_write_done, | |
1921 | lwb, prio, ZIO_FLAG_CANFAIL, &zb); | |
1922 | zil_lwb_add_block(lwb, &lwb->lwb_blk); | |
1923 | ||
1924 | if (lwb->lwb_slim) { | |
1925 | /* For Slim ZIL only write what is used. */ | |
1926 | wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, | |
1927 | int); | |
1928 | ASSERT3S(wsz, <=, lwb->lwb_sz); | |
1929 | zio_shrink(lwb->lwb_write_zio, wsz); | |
1930 | wsz = lwb->lwb_write_zio->io_size; | |
2cb992a9 | 1931 | } |
eda3fcd5 AM |
1932 | memset(lwb->lwb_buf + lwb->lwb_nused, 0, wsz - lwb->lwb_nused); |
1933 | zilc->zc_pad = 0; | |
1934 | zilc->zc_nused = lwb->lwb_nused; | |
1935 | zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum; | |
2cb992a9 | 1936 | } else { |
eda3fcd5 AM |
1937 | /* |
1938 | * We can't write the lwb if there was an allocation failure, | |
1939 | * so create a null zio instead just to maintain dependencies. | |
1940 | */ | |
1941 | lwb->lwb_write_zio = zio_null(lwb->lwb_root_zio, spa, NULL, | |
1942 | zil_lwb_write_done, lwb, ZIO_FLAG_CANFAIL); | |
1943 | lwb->lwb_write_zio->io_error = lwb->lwb_error; | |
2cb992a9 | 1944 | } |
eda3fcd5 AM |
1945 | if (lwb->lwb_child_zio) |
1946 | zio_add_child(lwb->lwb_write_zio, lwb->lwb_child_zio); | |
2cb992a9 | 1947 | |
f63811f0 | 1948 | /* |
eda3fcd5 | 1949 | * Open transaction to allocate the next block pointer. |
f63811f0 | 1950 | */ |
eda3fcd5 AM |
1951 | dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); |
1952 | VERIFY0(dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE)); | |
1953 | dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); | |
1954 | uint64_t txg = dmu_tx_get_txg(tx); | |
f63811f0 | 1955 | |
eda3fcd5 AM |
1956 | /* |
1957 | * Allocate next the block pointer unless we are already in error. | |
1958 | */ | |
1959 | lwb_t *nlwb = list_next(&zilog->zl_lwb_list, lwb); | |
1960 | blkptr_t *bp = &zilc->zc_next_blk; | |
1961 | BP_ZERO(bp); | |
1962 | error = lwb->lwb_error; | |
1963 | if (error == 0) { | |
1964 | error = zio_alloc_zil(spa, zilog->zl_os, txg, bp, nlwb->lwb_sz, | |
1965 | &slog); | |
1966 | } | |
1967 | if (error == 0) { | |
493fcce9 | 1968 | ASSERT3U(BP_GET_LOGICAL_BIRTH(bp), ==, txg); |
eda3fcd5 AM |
1969 | BP_SET_CHECKSUM(bp, nlwb->lwb_slim ? ZIO_CHECKSUM_ZILOG2 : |
1970 | ZIO_CHECKSUM_ZILOG); | |
1971 | bp->blk_cksum = lwb->lwb_blk.blk_cksum; | |
1972 | bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; | |
f63811f0 AM |
1973 | } |
1974 | ||
eda3fcd5 AM |
1975 | /* |
1976 | * Reduce TXG open time by incrementing inflight counter and committing | |
1977 | * the transaciton. zil_sync() will wait for it to return to zero. | |
1978 | */ | |
1979 | mutex_enter(&zilog->zl_lwb_io_lock); | |
1980 | lwb->lwb_issued_txg = txg; | |
1981 | zilog->zl_lwb_inflight[txg & TXG_MASK]++; | |
1982 | zilog->zl_lwb_max_issued_txg = MAX(txg, zilog->zl_lwb_max_issued_txg); | |
1983 | mutex_exit(&zilog->zl_lwb_io_lock); | |
1984 | dmu_tx_commit(tx); | |
34dc7c2f | 1985 | |
eda3fcd5 | 1986 | spa_config_enter(spa, SCL_STATE, lwb, RW_READER); |
34dc7c2f BB |
1987 | |
1988 | /* | |
eda3fcd5 AM |
1989 | * We've completed all potentially blocking operations. Update the |
1990 | * nlwb and allow it proceed without possible lock order reversals. | |
34dc7c2f | 1991 | */ |
eda3fcd5 AM |
1992 | mutex_enter(&zilog->zl_lock); |
1993 | zil_lwb_set_zio_dependency(zilog, lwb); | |
1994 | lwb->lwb_state = LWB_STATE_ISSUED; | |
1995 | ||
1996 | if (nlwb) { | |
1997 | nlwb->lwb_blk = *bp; | |
1998 | nlwb->lwb_error = error; | |
1999 | nlwb->lwb_slog = slog; | |
2000 | nlwb->lwb_alloc_txg = txg; | |
2001 | if (nlwb->lwb_state != LWB_STATE_READY) | |
2002 | nlwb = NULL; | |
2003 | } | |
2004 | mutex_exit(&zilog->zl_lock); | |
34dc7c2f | 2005 | |
f63811f0 AM |
2006 | if (lwb->lwb_slog) { |
2007 | ZIL_STAT_BUMP(zilog, zil_itx_metaslab_slog_count); | |
2008 | ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_bytes, | |
2009 | lwb->lwb_nused); | |
b6fbe61f AM |
2010 | ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_write, |
2011 | wsz); | |
2012 | ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_alloc, | |
2013 | BP_GET_LSIZE(&lwb->lwb_blk)); | |
f63811f0 AM |
2014 | } else { |
2015 | ZIL_STAT_BUMP(zilog, zil_itx_metaslab_normal_count); | |
2016 | ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_bytes, | |
2017 | lwb->lwb_nused); | |
b6fbe61f AM |
2018 | ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_write, |
2019 | wsz); | |
2020 | ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_alloc, | |
2021 | BP_GET_LSIZE(&lwb->lwb_blk)); | |
f63811f0 | 2022 | } |
1ce23dca | 2023 | lwb->lwb_issued_timestamp = gethrtime(); |
eda3fcd5 AM |
2024 | if (lwb->lwb_child_zio) |
2025 | zio_nowait(lwb->lwb_child_zio); | |
9da6b604 AM |
2026 | zio_nowait(lwb->lwb_write_zio); |
2027 | zio_nowait(lwb->lwb_root_zio); | |
eda3fcd5 AM |
2028 | |
2029 | /* | |
2030 | * If nlwb was ready when we gave it the block pointer, | |
2031 | * it is on us to issue it and possibly following ones. | |
2032 | */ | |
2033 | lwb = nlwb; | |
2034 | if (lwb) | |
2035 | goto next_lwb; | |
34dc7c2f BB |
2036 | } |
2037 | ||
b8738257 | 2038 | /* |
67a1b037 | 2039 | * Maximum amount of data that can be put into single log block. |
b8738257 MA |
2040 | */ |
2041 | uint64_t | |
67a1b037 | 2042 | zil_max_log_data(zilog_t *zilog, size_t hdrsize) |
b8738257 | 2043 | { |
67a1b037 | 2044 | return (zilog->zl_max_block_size - sizeof (zil_chain_t) - hdrsize); |
b8738257 MA |
2045 | } |
2046 | ||
2047 | /* | |
2048 | * Maximum amount of log space we agree to waste to reduce number of | |
66b81b34 | 2049 | * WR_NEED_COPY chunks to reduce zl_get_data() overhead (~6%). |
b8738257 MA |
2050 | */ |
2051 | static inline uint64_t | |
2052 | zil_max_waste_space(zilog_t *zilog) | |
2053 | { | |
66b81b34 | 2054 | return (zil_max_log_data(zilog, sizeof (lr_write_t)) / 16); |
b8738257 MA |
2055 | } |
2056 | ||
2057 | /* | |
2058 | * Maximum amount of write data for WR_COPIED. For correctness, consumers | |
2059 | * must fall back to WR_NEED_COPY if we can't fit the entire record into one | |
2060 | * maximum sized log block, because each WR_COPIED record must fit in a | |
66b81b34 AM |
2061 | * single log block. Below that it is a tradeoff of additional memory copy |
2062 | * and possibly worse log space efficiency vs additional range lock/unlock. | |
b8738257 | 2063 | */ |
66b81b34 AM |
2064 | static uint_t zil_maxcopied = 7680; |
2065 | ||
b8738257 MA |
2066 | uint64_t |
2067 | zil_max_copied_data(zilog_t *zilog) | |
2068 | { | |
66b81b34 AM |
2069 | uint64_t max_data = zil_max_log_data(zilog, sizeof (lr_write_t)); |
2070 | return (MIN(max_data, zil_maxcopied)); | |
b8738257 MA |
2071 | } |
2072 | ||
eff77a80 AM |
2073 | static uint64_t |
2074 | zil_itx_record_size(itx_t *itx) | |
2075 | { | |
2076 | lr_t *lr = &itx->itx_lr; | |
2077 | ||
2078 | if (lr->lrc_txtype == TX_COMMIT) | |
2079 | return (0); | |
2080 | ASSERT3U(lr->lrc_reclen, >=, sizeof (lr_t)); | |
2081 | return (lr->lrc_reclen); | |
2082 | } | |
2083 | ||
2084 | static uint64_t | |
2085 | zil_itx_data_size(itx_t *itx) | |
2086 | { | |
2087 | lr_t *lr = &itx->itx_lr; | |
2088 | lr_write_t *lrw = (lr_write_t *)lr; | |
2089 | ||
2090 | if (lr->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) { | |
2091 | ASSERT3U(lr->lrc_reclen, ==, sizeof (lr_write_t)); | |
2092 | return (P2ROUNDUP_TYPED(lrw->lr_length, sizeof (uint64_t), | |
2093 | uint64_t)); | |
2094 | } | |
2095 | return (0); | |
2096 | } | |
2097 | ||
2098 | static uint64_t | |
2099 | zil_itx_full_size(itx_t *itx) | |
2100 | { | |
2101 | lr_t *lr = &itx->itx_lr; | |
2102 | ||
2103 | if (lr->lrc_txtype == TX_COMMIT) | |
2104 | return (0); | |
2105 | ASSERT3U(lr->lrc_reclen, >=, sizeof (lr_t)); | |
2106 | return (lr->lrc_reclen + zil_itx_data_size(itx)); | |
2107 | } | |
2108 | ||
f63811f0 AM |
2109 | /* |
2110 | * Estimate space needed in the lwb for the itx. Allocate more lwbs or | |
2111 | * split the itx as needed, but don't touch the actual transaction data. | |
2112 | * Has to be called under zl_issuer_lock to call zil_lwb_write_close() | |
2113 | * to chain more lwbs. | |
2114 | */ | |
34dc7c2f | 2115 | static lwb_t * |
f63811f0 | 2116 | zil_lwb_assign(zilog_t *zilog, lwb_t *lwb, itx_t *itx, list_t *ilwbs) |
34dc7c2f | 2117 | { |
f63811f0 AM |
2118 | itx_t *citx; |
2119 | lr_t *lr, *clr; | |
2120 | lr_write_t *lrw; | |
2121 | uint64_t dlen, dnow, lwb_sp, reclen, max_log_data; | |
34dc7c2f | 2122 | |
1b2b0aca | 2123 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca PS |
2124 | ASSERT3P(lwb, !=, NULL); |
2125 | ASSERT3P(lwb->lwb_buf, !=, NULL); | |
2126 | ||
2127 | zil_lwb_write_open(zilog, lwb); | |
428870ff | 2128 | |
f63811f0 AM |
2129 | lr = &itx->itx_lr; |
2130 | lrw = (lr_write_t *)lr; | |
1ce23dca PS |
2131 | |
2132 | /* | |
2133 | * A commit itx doesn't represent any on-disk state; instead | |
2134 | * it's simply used as a place holder on the commit list, and | |
2135 | * provides a mechanism for attaching a "commit waiter" onto the | |
2136 | * correct lwb (such that the waiter can be signalled upon | |
2137 | * completion of that lwb). Thus, we don't process this itx's | |
2138 | * log record if it's a commit itx (these itx's don't have log | |
2139 | * records), and instead link the itx's waiter onto the lwb's | |
2140 | * list of waiters. | |
2141 | * | |
2142 | * For more details, see the comment above zil_commit(). | |
2143 | */ | |
f63811f0 | 2144 | if (lr->lrc_txtype == TX_COMMIT) { |
1ce23dca | 2145 | zil_commit_waiter_link_lwb(itx->itx_private, lwb); |
f63811f0 | 2146 | list_insert_tail(&lwb->lwb_itxs, itx); |
1ce23dca PS |
2147 | return (lwb); |
2148 | } | |
34dc7c2f | 2149 | |
2a27fd41 | 2150 | reclen = lr->lrc_reclen; |
eff77a80 | 2151 | ASSERT3U(reclen, >=, sizeof (lr_t)); |
2a27fd41 | 2152 | ASSERT3U(reclen, <=, zil_max_log_data(zilog, 0)); |
eff77a80 | 2153 | dlen = zil_itx_data_size(itx); |
34dc7c2f | 2154 | |
1b7c1e5c | 2155 | cont: |
34dc7c2f BB |
2156 | /* |
2157 | * If this record won't fit in the current log block, start a new one. | |
1b7c1e5c | 2158 | * For WR_NEED_COPY optimize layout for minimal number of chunks. |
34dc7c2f | 2159 | */ |
eda3fcd5 | 2160 | lwb_sp = lwb->lwb_nmax - lwb->lwb_nused; |
67a1b037 | 2161 | max_log_data = zil_max_log_data(zilog, sizeof (lr_write_t)); |
1b7c1e5c | 2162 | if (reclen > lwb_sp || (reclen + dlen > lwb_sp && |
b8738257 MA |
2163 | lwb_sp < zil_max_waste_space(zilog) && |
2164 | (dlen % max_log_data == 0 || | |
2165 | lwb_sp < reclen + dlen % max_log_data))) { | |
eda3fcd5 AM |
2166 | list_insert_tail(ilwbs, lwb); |
2167 | lwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_OPENED); | |
34dc7c2f BB |
2168 | if (lwb == NULL) |
2169 | return (NULL); | |
eda3fcd5 | 2170 | lwb_sp = lwb->lwb_nmax - lwb->lwb_nused; |
34dc7c2f BB |
2171 | } |
2172 | ||
2a27fd41 AM |
2173 | /* |
2174 | * There must be enough space in the log block to hold reclen. | |
2175 | * For WR_COPIED, we need to fit the whole record in one block, | |
2176 | * and reclen is the write record header size + the data size. | |
2177 | * For WR_NEED_COPY, we can create multiple records, splitting | |
2178 | * the data into multiple blocks, so we only need to fit one | |
2179 | * word of data per block; in this case reclen is just the header | |
2180 | * size (no data). | |
2181 | */ | |
2182 | ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp); | |
2183 | ||
1b7c1e5c | 2184 | dnow = MIN(dlen, lwb_sp - reclen); |
f63811f0 AM |
2185 | if (dlen > dnow) { |
2186 | ASSERT3U(lr->lrc_txtype, ==, TX_WRITE); | |
2187 | ASSERT3U(itx->itx_wr_state, ==, WR_NEED_COPY); | |
2188 | citx = zil_itx_clone(itx); | |
2189 | clr = &citx->itx_lr; | |
2190 | lr_write_t *clrw = (lr_write_t *)clr; | |
2191 | clrw->lr_length = dnow; | |
2192 | lrw->lr_offset += dnow; | |
2193 | lrw->lr_length -= dnow; | |
eff77a80 | 2194 | zilog->zl_cur_left -= dnow; |
f63811f0 AM |
2195 | } else { |
2196 | citx = itx; | |
2197 | clr = lr; | |
2198 | } | |
2199 | ||
2200 | /* | |
2201 | * We're actually making an entry, so update lrc_seq to be the | |
2202 | * log record sequence number. Note that this is generally not | |
2203 | * equal to the itx sequence number because not all transactions | |
2204 | * are synchronous, and sometimes spa_sync() gets there first. | |
2205 | */ | |
2206 | clr->lrc_seq = ++zilog->zl_lr_seq; | |
2207 | ||
2208 | lwb->lwb_nused += reclen + dnow; | |
eda3fcd5 | 2209 | ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_nmax); |
f63811f0 AM |
2210 | ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t))); |
2211 | ||
2212 | zil_lwb_add_txg(lwb, lr->lrc_txg); | |
2213 | list_insert_tail(&lwb->lwb_itxs, citx); | |
2214 | ||
2215 | dlen -= dnow; | |
eff77a80 | 2216 | if (dlen > 0) |
f63811f0 | 2217 | goto cont; |
f63811f0 | 2218 | |
eda3fcd5 AM |
2219 | if (lr->lrc_txtype == TX_WRITE && |
2220 | lr->lrc_txg > spa_freeze_txg(zilog->zl_spa)) | |
2221 | txg_wait_synced(zilog->zl_dmu_pool, lr->lrc_txg); | |
f63811f0 AM |
2222 | |
2223 | return (lwb); | |
2224 | } | |
2225 | ||
2226 | /* | |
2227 | * Fill the actual transaction data into the lwb, following zil_lwb_assign(). | |
2228 | * Does not require locking. | |
2229 | */ | |
2230 | static void | |
2231 | zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx) | |
2232 | { | |
2233 | lr_t *lr, *lrb; | |
2234 | lr_write_t *lrw, *lrwb; | |
2235 | char *lr_buf; | |
2236 | uint64_t dlen, reclen; | |
2237 | ||
2238 | lr = &itx->itx_lr; | |
2239 | lrw = (lr_write_t *)lr; | |
2240 | ||
2241 | if (lr->lrc_txtype == TX_COMMIT) | |
2242 | return; | |
2243 | ||
f63811f0 | 2244 | reclen = lr->lrc_reclen; |
eff77a80 | 2245 | dlen = zil_itx_data_size(itx); |
f63811f0 AM |
2246 | ASSERT3U(reclen + dlen, <=, lwb->lwb_nused - lwb->lwb_nfilled); |
2247 | ||
2248 | lr_buf = lwb->lwb_buf + lwb->lwb_nfilled; | |
2249 | memcpy(lr_buf, lr, reclen); | |
2250 | lrb = (lr_t *)lr_buf; /* Like lr, but inside lwb. */ | |
2251 | lrwb = (lr_write_t *)lrb; /* Like lrw, but inside lwb. */ | |
34dc7c2f | 2252 | |
fb087146 | 2253 | ZIL_STAT_BUMP(zilog, zil_itx_count); |
b6ad9671 | 2254 | |
34dc7c2f BB |
2255 | /* |
2256 | * If it's a write, fetch the data or get its blkptr as appropriate. | |
2257 | */ | |
f63811f0 | 2258 | if (lr->lrc_txtype == TX_WRITE) { |
b6ad9671 | 2259 | if (itx->itx_wr_state == WR_COPIED) { |
fb087146 AH |
2260 | ZIL_STAT_BUMP(zilog, zil_itx_copied_count); |
2261 | ZIL_STAT_INCR(zilog, zil_itx_copied_bytes, | |
2262 | lrw->lr_length); | |
b6ad9671 | 2263 | } else { |
34dc7c2f BB |
2264 | char *dbuf; |
2265 | int error; | |
2266 | ||
1b7c1e5c | 2267 | if (itx->itx_wr_state == WR_NEED_COPY) { |
428870ff | 2268 | dbuf = lr_buf + reclen; |
f63811f0 | 2269 | lrb->lrc_reclen += dlen; |
fb087146 AH |
2270 | ZIL_STAT_BUMP(zilog, zil_itx_needcopy_count); |
2271 | ZIL_STAT_INCR(zilog, zil_itx_needcopy_bytes, | |
f63811f0 | 2272 | dlen); |
34dc7c2f | 2273 | } else { |
1ce23dca | 2274 | ASSERT3S(itx->itx_wr_state, ==, WR_INDIRECT); |
34dc7c2f | 2275 | dbuf = NULL; |
fb087146 AH |
2276 | ZIL_STAT_BUMP(zilog, zil_itx_indirect_count); |
2277 | ZIL_STAT_INCR(zilog, zil_itx_indirect_bytes, | |
d1d7e268 | 2278 | lrw->lr_length); |
eda3fcd5 | 2279 | if (lwb->lwb_child_zio == NULL) { |
3afdc97d AM |
2280 | lwb->lwb_child_zio = zio_null(NULL, |
2281 | zilog->zl_spa, NULL, NULL, NULL, | |
eda3fcd5 AM |
2282 | ZIO_FLAG_CANFAIL); |
2283 | } | |
34dc7c2f | 2284 | } |
1ce23dca PS |
2285 | |
2286 | /* | |
eda3fcd5 AM |
2287 | * The "lwb_child_zio" we pass in will become a child of |
2288 | * "lwb_write_zio", when one is created, so one will be | |
2289 | * a parent of any zio's created by the "zl_get_data". | |
2290 | * This way "lwb_write_zio" will first wait for children | |
2291 | * block pointers before own writing, and then for their | |
2292 | * writing completion before the vdev cache flushing. | |
1ce23dca PS |
2293 | */ |
2294 | error = zilog->zl_get_data(itx->itx_private, | |
296a4a36 | 2295 | itx->itx_gen, lrwb, dbuf, lwb, |
eda3fcd5 | 2296 | lwb->lwb_child_zio); |
f63811f0 | 2297 | if (dbuf != NULL && error == 0) { |
3a185275 | 2298 | /* Zero any padding bytes in the last block. */ |
f63811f0 AM |
2299 | memset((char *)dbuf + lrwb->lr_length, 0, |
2300 | dlen - lrwb->lr_length); | |
2301 | } | |
1ce23dca | 2302 | |
3a7c3511 RY |
2303 | /* |
2304 | * Typically, the only return values we should see from | |
2305 | * ->zl_get_data() are 0, EIO, ENOENT, EEXIST or | |
2306 | * EALREADY. However, it is also possible to see other | |
2307 | * error values such as ENOSPC or EINVAL from | |
2308 | * dmu_read() -> dnode_hold() -> dnode_hold_impl() or | |
2309 | * ENXIO as well as a multitude of others from the | |
2310 | * block layer through dmu_buf_hold() -> dbuf_read() | |
2311 | * -> zio_wait(), as well as through dmu_read() -> | |
2312 | * dnode_hold() -> dnode_hold_impl() -> dbuf_read() -> | |
2313 | * zio_wait(). When these errors happen, we can assume | |
2314 | * that neither an immediate write nor an indirect | |
2315 | * write occurred, so we need to fall back to | |
2316 | * txg_wait_synced(). This is unusual, so we print to | |
2317 | * dmesg whenever one of these errors occurs. | |
2318 | */ | |
2319 | switch (error) { | |
2320 | case 0: | |
2321 | break; | |
2322 | default: | |
2323 | cmn_err(CE_WARN, "zil_lwb_commit() received " | |
2324 | "unexpected error %d from ->zl_get_data()" | |
2325 | ". Falling back to txg_wait_synced().", | |
2326 | error); | |
2327 | zfs_fallthrough; | |
2328 | case EIO: | |
eda3fcd5 AM |
2329 | txg_wait_synced(zilog->zl_dmu_pool, |
2330 | lr->lrc_txg); | |
3a7c3511 RY |
2331 | zfs_fallthrough; |
2332 | case ENOENT: | |
2333 | zfs_fallthrough; | |
2334 | case EEXIST: | |
2335 | zfs_fallthrough; | |
2336 | case EALREADY: | |
f63811f0 | 2337 | return; |
34dc7c2f BB |
2338 | } |
2339 | } | |
2340 | } | |
2341 | ||
f63811f0 AM |
2342 | lwb->lwb_nfilled += reclen + dlen; |
2343 | ASSERT3S(lwb->lwb_nfilled, <=, lwb->lwb_nused); | |
2344 | ASSERT0(P2PHASE(lwb->lwb_nfilled, sizeof (uint64_t))); | |
34dc7c2f BB |
2345 | } |
2346 | ||
2347 | itx_t * | |
58714c28 | 2348 | zil_itx_create(uint64_t txtype, size_t olrsize) |
34dc7c2f | 2349 | { |
58714c28 | 2350 | size_t itxsize, lrsize; |
34dc7c2f BB |
2351 | itx_t *itx; |
2352 | ||
2a27fd41 | 2353 | ASSERT3U(olrsize, >=, sizeof (lr_t)); |
58714c28 | 2354 | lrsize = P2ROUNDUP_TYPED(olrsize, sizeof (uint64_t), size_t); |
2a27fd41 | 2355 | ASSERT3U(lrsize, >=, olrsize); |
72841b9f | 2356 | itxsize = offsetof(itx_t, itx_lr) + lrsize; |
34dc7c2f | 2357 | |
72841b9f | 2358 | itx = zio_data_buf_alloc(itxsize); |
34dc7c2f BB |
2359 | itx->itx_lr.lrc_txtype = txtype; |
2360 | itx->itx_lr.lrc_reclen = lrsize; | |
34dc7c2f | 2361 | itx->itx_lr.lrc_seq = 0; /* defensive */ |
861166b0 | 2362 | memset((char *)&itx->itx_lr + olrsize, 0, lrsize - olrsize); |
572e2857 | 2363 | itx->itx_sync = B_TRUE; /* default is synchronous */ |
119a394a ED |
2364 | itx->itx_callback = NULL; |
2365 | itx->itx_callback_data = NULL; | |
72841b9f | 2366 | itx->itx_size = itxsize; |
34dc7c2f BB |
2367 | |
2368 | return (itx); | |
2369 | } | |
2370 | ||
f63811f0 AM |
2371 | static itx_t * |
2372 | zil_itx_clone(itx_t *oitx) | |
2373 | { | |
2a27fd41 AM |
2374 | ASSERT3U(oitx->itx_size, >=, sizeof (itx_t)); |
2375 | ASSERT3U(oitx->itx_size, ==, | |
2376 | offsetof(itx_t, itx_lr) + oitx->itx_lr.lrc_reclen); | |
2377 | ||
f63811f0 AM |
2378 | itx_t *itx = zio_data_buf_alloc(oitx->itx_size); |
2379 | memcpy(itx, oitx, oitx->itx_size); | |
2380 | itx->itx_callback = NULL; | |
2381 | itx->itx_callback_data = NULL; | |
2382 | return (itx); | |
2383 | } | |
2384 | ||
428870ff BB |
2385 | void |
2386 | zil_itx_destroy(itx_t *itx) | |
2387 | { | |
2a27fd41 AM |
2388 | ASSERT3U(itx->itx_size, >=, sizeof (itx_t)); |
2389 | ASSERT3U(itx->itx_lr.lrc_reclen, ==, | |
2390 | itx->itx_size - offsetof(itx_t, itx_lr)); | |
1ce23dca PS |
2391 | IMPLY(itx->itx_lr.lrc_txtype == TX_COMMIT, itx->itx_callback == NULL); |
2392 | IMPLY(itx->itx_callback != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT); | |
2393 | ||
2394 | if (itx->itx_callback != NULL) | |
2395 | itx->itx_callback(itx->itx_callback_data); | |
2396 | ||
72841b9f | 2397 | zio_data_buf_free(itx, itx->itx_size); |
428870ff BB |
2398 | } |
2399 | ||
572e2857 BB |
2400 | /* |
2401 | * Free up the sync and async itxs. The itxs_t has already been detached | |
2402 | * so no locks are needed. | |
2403 | */ | |
2404 | static void | |
23c13c7e | 2405 | zil_itxg_clean(void *arg) |
34dc7c2f | 2406 | { |
572e2857 BB |
2407 | itx_t *itx; |
2408 | list_t *list; | |
2409 | avl_tree_t *t; | |
2410 | void *cookie; | |
23c13c7e | 2411 | itxs_t *itxs = arg; |
572e2857 BB |
2412 | itx_async_node_t *ian; |
2413 | ||
2414 | list = &itxs->i_sync_list; | |
895e0313 | 2415 | while ((itx = list_remove_head(list)) != NULL) { |
1ce23dca PS |
2416 | /* |
2417 | * In the general case, commit itxs will not be found | |
2418 | * here, as they'll be committed to an lwb via | |
f63811f0 | 2419 | * zil_lwb_assign(), and free'd in that function. Having |
1ce23dca PS |
2420 | * said that, it is still possible for commit itxs to be |
2421 | * found here, due to the following race: | |
2422 | * | |
2423 | * - a thread calls zil_commit() which assigns the | |
2424 | * commit itx to a per-txg i_sync_list | |
2425 | * - zil_itxg_clean() is called (e.g. via spa_sync()) | |
2426 | * while the waiter is still on the i_sync_list | |
2427 | * | |
2428 | * There's nothing to prevent syncing the txg while the | |
2429 | * waiter is on the i_sync_list. This normally doesn't | |
2430 | * happen because spa_sync() is slower than zil_commit(), | |
2431 | * but if zil_commit() calls txg_wait_synced() (e.g. | |
2432 | * because zil_create() or zil_commit_writer_stall() is | |
2433 | * called) we will hit this case. | |
2434 | */ | |
2435 | if (itx->itx_lr.lrc_txtype == TX_COMMIT) | |
2436 | zil_commit_waiter_skip(itx->itx_private); | |
2437 | ||
19ea3d25 | 2438 | zil_itx_destroy(itx); |
572e2857 | 2439 | } |
34dc7c2f | 2440 | |
572e2857 BB |
2441 | cookie = NULL; |
2442 | t = &itxs->i_async_tree; | |
2443 | while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { | |
2444 | list = &ian->ia_list; | |
895e0313 | 2445 | while ((itx = list_remove_head(list)) != NULL) { |
1ce23dca PS |
2446 | /* commit itxs should never be on the async lists. */ |
2447 | ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT); | |
19ea3d25 | 2448 | zil_itx_destroy(itx); |
572e2857 BB |
2449 | } |
2450 | list_destroy(list); | |
2451 | kmem_free(ian, sizeof (itx_async_node_t)); | |
2452 | } | |
2453 | avl_destroy(t); | |
34dc7c2f | 2454 | |
572e2857 BB |
2455 | kmem_free(itxs, sizeof (itxs_t)); |
2456 | } | |
34dc7c2f | 2457 | |
572e2857 BB |
2458 | static int |
2459 | zil_aitx_compare(const void *x1, const void *x2) | |
2460 | { | |
2461 | const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid; | |
2462 | const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid; | |
2463 | ||
ca577779 | 2464 | return (TREE_CMP(o1, o2)); |
34dc7c2f BB |
2465 | } |
2466 | ||
2467 | /* | |
572e2857 | 2468 | * Remove all async itx with the given oid. |
34dc7c2f | 2469 | */ |
8e556c5e | 2470 | void |
572e2857 | 2471 | zil_remove_async(zilog_t *zilog, uint64_t oid) |
34dc7c2f | 2472 | { |
572e2857 | 2473 | uint64_t otxg, txg; |
2a27fd41 | 2474 | itx_async_node_t *ian, ian_search; |
572e2857 BB |
2475 | avl_tree_t *t; |
2476 | avl_index_t where; | |
34dc7c2f BB |
2477 | list_t clean_list; |
2478 | itx_t *itx; | |
2479 | ||
572e2857 | 2480 | ASSERT(oid != 0); |
34dc7c2f BB |
2481 | list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); |
2482 | ||
572e2857 BB |
2483 | if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ |
2484 | otxg = ZILTEST_TXG; | |
2485 | else | |
2486 | otxg = spa_last_synced_txg(zilog->zl_spa) + 1; | |
34dc7c2f | 2487 | |
572e2857 BB |
2488 | for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { |
2489 | itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; | |
2490 | ||
2491 | mutex_enter(&itxg->itxg_lock); | |
2492 | if (itxg->itxg_txg != txg) { | |
2493 | mutex_exit(&itxg->itxg_lock); | |
2494 | continue; | |
2495 | } | |
34dc7c2f | 2496 | |
572e2857 BB |
2497 | /* |
2498 | * Locate the object node and append its list. | |
2499 | */ | |
2500 | t = &itxg->itxg_itxs->i_async_tree; | |
2a27fd41 AM |
2501 | ian_search.ia_foid = oid; |
2502 | ian = avl_find(t, &ian_search, &where); | |
572e2857 BB |
2503 | if (ian != NULL) |
2504 | list_move_tail(&clean_list, &ian->ia_list); | |
2505 | mutex_exit(&itxg->itxg_lock); | |
2506 | } | |
895e0313 | 2507 | while ((itx = list_remove_head(&clean_list)) != NULL) { |
1ce23dca PS |
2508 | /* commit itxs should never be on the async lists. */ |
2509 | ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT); | |
19ea3d25 | 2510 | zil_itx_destroy(itx); |
34dc7c2f BB |
2511 | } |
2512 | list_destroy(&clean_list); | |
2513 | } | |
2514 | ||
572e2857 BB |
2515 | void |
2516 | zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) | |
2517 | { | |
2518 | uint64_t txg; | |
2519 | itxg_t *itxg; | |
2520 | itxs_t *itxs, *clean = NULL; | |
2521 | ||
572e2857 BB |
2522 | /* |
2523 | * Ensure the data of a renamed file is committed before the rename. | |
2524 | */ | |
2525 | if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME) | |
2526 | zil_async_to_sync(zilog, itx->itx_oid); | |
2527 | ||
29809a6c | 2528 | if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) |
572e2857 BB |
2529 | txg = ZILTEST_TXG; |
2530 | else | |
2531 | txg = dmu_tx_get_txg(tx); | |
2532 | ||
2533 | itxg = &zilog->zl_itxg[txg & TXG_MASK]; | |
2534 | mutex_enter(&itxg->itxg_lock); | |
2535 | itxs = itxg->itxg_itxs; | |
2536 | if (itxg->itxg_txg != txg) { | |
2537 | if (itxs != NULL) { | |
2538 | /* | |
2539 | * The zil_clean callback hasn't got around to cleaning | |
2540 | * this itxg. Save the itxs for release below. | |
2541 | * This should be rare. | |
2542 | */ | |
55922e73 | 2543 | zfs_dbgmsg("zil_itx_assign: missed itx cleanup for " |
8e739b2c | 2544 | "txg %llu", (u_longlong_t)itxg->itxg_txg); |
572e2857 BB |
2545 | clean = itxg->itxg_itxs; |
2546 | } | |
572e2857 | 2547 | itxg->itxg_txg = txg; |
d1d7e268 | 2548 | itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), |
79c76d5b | 2549 | KM_SLEEP); |
572e2857 BB |
2550 | |
2551 | list_create(&itxs->i_sync_list, sizeof (itx_t), | |
2552 | offsetof(itx_t, itx_node)); | |
2553 | avl_create(&itxs->i_async_tree, zil_aitx_compare, | |
2554 | sizeof (itx_async_node_t), | |
2555 | offsetof(itx_async_node_t, ia_node)); | |
2556 | } | |
2557 | if (itx->itx_sync) { | |
2558 | list_insert_tail(&itxs->i_sync_list, itx); | |
572e2857 BB |
2559 | } else { |
2560 | avl_tree_t *t = &itxs->i_async_tree; | |
50c957f7 NB |
2561 | uint64_t foid = |
2562 | LR_FOID_GET_OBJ(((lr_ooo_t *)&itx->itx_lr)->lr_foid); | |
572e2857 BB |
2563 | itx_async_node_t *ian; |
2564 | avl_index_t where; | |
2565 | ||
2566 | ian = avl_find(t, &foid, &where); | |
2567 | if (ian == NULL) { | |
d1d7e268 | 2568 | ian = kmem_alloc(sizeof (itx_async_node_t), |
79c76d5b | 2569 | KM_SLEEP); |
572e2857 BB |
2570 | list_create(&ian->ia_list, sizeof (itx_t), |
2571 | offsetof(itx_t, itx_node)); | |
2572 | ian->ia_foid = foid; | |
2573 | avl_insert(t, ian, where); | |
2574 | } | |
2575 | list_insert_tail(&ian->ia_list, itx); | |
2576 | } | |
2577 | ||
2578 | itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); | |
1ce23dca PS |
2579 | |
2580 | /* | |
2581 | * We don't want to dirty the ZIL using ZILTEST_TXG, because | |
2582 | * zil_clean() will never be called using ZILTEST_TXG. Thus, we | |
2583 | * need to be careful to always dirty the ZIL using the "real" | |
2584 | * TXG (not itxg_txg) even when the SPA is frozen. | |
2585 | */ | |
2586 | zilog_dirty(zilog, dmu_tx_get_txg(tx)); | |
572e2857 BB |
2587 | mutex_exit(&itxg->itxg_lock); |
2588 | ||
2589 | /* Release the old itxs now we've dropped the lock */ | |
2590 | if (clean != NULL) | |
2591 | zil_itxg_clean(clean); | |
2592 | } | |
2593 | ||
34dc7c2f BB |
2594 | /* |
2595 | * If there are any in-memory intent log transactions which have now been | |
29809a6c | 2596 | * synced then start up a taskq to free them. We should only do this after we |
e1cfd73f | 2597 | * have written out the uberblocks (i.e. txg has been committed) so that |
29809a6c MA |
2598 | * don't inadvertently clean out in-memory log records that would be required |
2599 | * by zil_commit(). | |
34dc7c2f BB |
2600 | */ |
2601 | void | |
572e2857 | 2602 | zil_clean(zilog_t *zilog, uint64_t synced_txg) |
34dc7c2f | 2603 | { |
572e2857 BB |
2604 | itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK]; |
2605 | itxs_t *clean_me; | |
34dc7c2f | 2606 | |
1ce23dca PS |
2607 | ASSERT3U(synced_txg, <, ZILTEST_TXG); |
2608 | ||
572e2857 BB |
2609 | mutex_enter(&itxg->itxg_lock); |
2610 | if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) { | |
2611 | mutex_exit(&itxg->itxg_lock); | |
2612 | return; | |
2613 | } | |
2614 | ASSERT3U(itxg->itxg_txg, <=, synced_txg); | |
a032ac4b | 2615 | ASSERT3U(itxg->itxg_txg, !=, 0); |
572e2857 BB |
2616 | clean_me = itxg->itxg_itxs; |
2617 | itxg->itxg_itxs = NULL; | |
2618 | itxg->itxg_txg = 0; | |
2619 | mutex_exit(&itxg->itxg_lock); | |
2620 | /* | |
2621 | * Preferably start a task queue to free up the old itxs but | |
2622 | * if taskq_dispatch can't allocate resources to do that then | |
2623 | * free it in-line. This should be rare. Note, using TQ_SLEEP | |
2624 | * created a bad performance problem. | |
2625 | */ | |
a032ac4b BB |
2626 | ASSERT3P(zilog->zl_dmu_pool, !=, NULL); |
2627 | ASSERT3P(zilog->zl_dmu_pool->dp_zil_clean_taskq, !=, NULL); | |
2628 | taskqid_t id = taskq_dispatch(zilog->zl_dmu_pool->dp_zil_clean_taskq, | |
23c13c7e | 2629 | zil_itxg_clean, clean_me, TQ_NOSLEEP); |
a032ac4b | 2630 | if (id == TASKQID_INVALID) |
572e2857 BB |
2631 | zil_itxg_clean(clean_me); |
2632 | } | |
2633 | ||
2634 | /* | |
1ce23dca PS |
2635 | * This function will traverse the queue of itxs that need to be |
2636 | * committed, and move them onto the ZIL's zl_itx_commit_list. | |
572e2857 | 2637 | */ |
233425a1 | 2638 | static uint64_t |
572e2857 BB |
2639 | zil_get_commit_list(zilog_t *zilog) |
2640 | { | |
233425a1 | 2641 | uint64_t otxg, txg, wtxg = 0; |
572e2857 | 2642 | list_t *commit_list = &zilog->zl_itx_commit_list; |
572e2857 | 2643 | |
1b2b0aca | 2644 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca | 2645 | |
572e2857 BB |
2646 | if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ |
2647 | otxg = ZILTEST_TXG; | |
2648 | else | |
2649 | otxg = spa_last_synced_txg(zilog->zl_spa) + 1; | |
2650 | ||
55922e73 GW |
2651 | /* |
2652 | * This is inherently racy, since there is nothing to prevent | |
2653 | * the last synced txg from changing. That's okay since we'll | |
2654 | * only commit things in the future. | |
2655 | */ | |
572e2857 BB |
2656 | for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { |
2657 | itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; | |
2658 | ||
2659 | mutex_enter(&itxg->itxg_lock); | |
2660 | if (itxg->itxg_txg != txg) { | |
2661 | mutex_exit(&itxg->itxg_lock); | |
2662 | continue; | |
2663 | } | |
2664 | ||
55922e73 GW |
2665 | /* |
2666 | * If we're adding itx records to the zl_itx_commit_list, | |
2667 | * then the zil better be dirty in this "txg". We can assert | |
2668 | * that here since we're holding the itxg_lock which will | |
2669 | * prevent spa_sync from cleaning it. Once we add the itxs | |
2670 | * to the zl_itx_commit_list we must commit it to disk even | |
2671 | * if it's unnecessary (i.e. the txg was synced). | |
2672 | */ | |
2673 | ASSERT(zilog_is_dirty_in_txg(zilog, txg) || | |
2674 | spa_freeze_txg(zilog->zl_spa) != UINT64_MAX); | |
233425a1 | 2675 | list_t *sync_list = &itxg->itxg_itxs->i_sync_list; |
eff77a80 | 2676 | itx_t *itx = NULL; |
233425a1 AM |
2677 | if (unlikely(zilog->zl_suspend > 0)) { |
2678 | /* | |
2679 | * ZIL was just suspended, but we lost the race. | |
2680 | * Allow all earlier itxs to be committed, but ask | |
2681 | * caller to do txg_wait_synced(txg) for any new. | |
2682 | */ | |
2683 | if (!list_is_empty(sync_list)) | |
2684 | wtxg = MAX(wtxg, txg); | |
2685 | } else { | |
eff77a80 | 2686 | itx = list_head(sync_list); |
233425a1 AM |
2687 | list_move_tail(commit_list, sync_list); |
2688 | } | |
572e2857 BB |
2689 | |
2690 | mutex_exit(&itxg->itxg_lock); | |
eff77a80 AM |
2691 | |
2692 | while (itx != NULL) { | |
2693 | uint64_t s = zil_itx_full_size(itx); | |
2694 | zilog->zl_cur_size += s; | |
2695 | zilog->zl_cur_left += s; | |
2696 | s = zil_itx_record_size(itx); | |
2697 | zilog->zl_cur_max = MAX(zilog->zl_cur_max, s); | |
2698 | itx = list_next(commit_list, itx); | |
2699 | } | |
572e2857 | 2700 | } |
233425a1 | 2701 | return (wtxg); |
572e2857 BB |
2702 | } |
2703 | ||
2704 | /* | |
2705 | * Move the async itxs for a specified object to commit into sync lists. | |
2706 | */ | |
eedb3a62 | 2707 | void |
572e2857 BB |
2708 | zil_async_to_sync(zilog_t *zilog, uint64_t foid) |
2709 | { | |
2710 | uint64_t otxg, txg; | |
2a27fd41 | 2711 | itx_async_node_t *ian, ian_search; |
572e2857 BB |
2712 | avl_tree_t *t; |
2713 | avl_index_t where; | |
2714 | ||
2715 | if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ | |
2716 | otxg = ZILTEST_TXG; | |
2717 | else | |
2718 | otxg = spa_last_synced_txg(zilog->zl_spa) + 1; | |
2719 | ||
55922e73 GW |
2720 | /* |
2721 | * This is inherently racy, since there is nothing to prevent | |
2722 | * the last synced txg from changing. | |
2723 | */ | |
572e2857 BB |
2724 | for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { |
2725 | itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; | |
2726 | ||
2727 | mutex_enter(&itxg->itxg_lock); | |
2728 | if (itxg->itxg_txg != txg) { | |
2729 | mutex_exit(&itxg->itxg_lock); | |
2730 | continue; | |
2731 | } | |
2732 | ||
2733 | /* | |
2734 | * If a foid is specified then find that node and append its | |
2735 | * list. Otherwise walk the tree appending all the lists | |
2736 | * to the sync list. We add to the end rather than the | |
2737 | * beginning to ensure the create has happened. | |
2738 | */ | |
2739 | t = &itxg->itxg_itxs->i_async_tree; | |
2740 | if (foid != 0) { | |
2a27fd41 AM |
2741 | ian_search.ia_foid = foid; |
2742 | ian = avl_find(t, &ian_search, &where); | |
572e2857 BB |
2743 | if (ian != NULL) { |
2744 | list_move_tail(&itxg->itxg_itxs->i_sync_list, | |
2745 | &ian->ia_list); | |
2746 | } | |
2747 | } else { | |
2748 | void *cookie = NULL; | |
2749 | ||
2750 | while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { | |
2751 | list_move_tail(&itxg->itxg_itxs->i_sync_list, | |
2752 | &ian->ia_list); | |
2753 | list_destroy(&ian->ia_list); | |
2754 | kmem_free(ian, sizeof (itx_async_node_t)); | |
2755 | } | |
2756 | } | |
2757 | mutex_exit(&itxg->itxg_lock); | |
34dc7c2f | 2758 | } |
34dc7c2f BB |
2759 | } |
2760 | ||
1ce23dca PS |
2761 | /* |
2762 | * This function will prune commit itxs that are at the head of the | |
2763 | * commit list (it won't prune past the first non-commit itx), and | |
2764 | * either: a) attach them to the last lwb that's still pending | |
2765 | * completion, or b) skip them altogether. | |
2766 | * | |
2767 | * This is used as a performance optimization to prevent commit itxs | |
2768 | * from generating new lwbs when it's unnecessary to do so. | |
2769 | */ | |
b128c09f | 2770 | static void |
1ce23dca | 2771 | zil_prune_commit_list(zilog_t *zilog) |
34dc7c2f | 2772 | { |
572e2857 | 2773 | itx_t *itx; |
34dc7c2f | 2774 | |
1b2b0aca | 2775 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
572e2857 | 2776 | |
1ce23dca PS |
2777 | while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) { |
2778 | lr_t *lrc = &itx->itx_lr; | |
2779 | if (lrc->lrc_txtype != TX_COMMIT) | |
2780 | break; | |
572e2857 | 2781 | |
1ce23dca PS |
2782 | mutex_enter(&zilog->zl_lock); |
2783 | ||
2784 | lwb_t *last_lwb = zilog->zl_last_lwb_opened; | |
900d09b2 PS |
2785 | if (last_lwb == NULL || |
2786 | last_lwb->lwb_state == LWB_STATE_FLUSH_DONE) { | |
1ce23dca PS |
2787 | /* |
2788 | * All of the itxs this waiter was waiting on | |
2789 | * must have already completed (or there were | |
2790 | * never any itx's for it to wait on), so it's | |
2791 | * safe to skip this waiter and mark it done. | |
2792 | */ | |
2793 | zil_commit_waiter_skip(itx->itx_private); | |
2794 | } else { | |
2795 | zil_commit_waiter_link_lwb(itx->itx_private, last_lwb); | |
1ce23dca PS |
2796 | } |
2797 | ||
2798 | mutex_exit(&zilog->zl_lock); | |
2799 | ||
2800 | list_remove(&zilog->zl_itx_commit_list, itx); | |
2801 | zil_itx_destroy(itx); | |
2802 | } | |
2803 | ||
2804 | IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT); | |
2805 | } | |
2806 | ||
2807 | static void | |
2808 | zil_commit_writer_stall(zilog_t *zilog) | |
2809 | { | |
2810 | /* | |
2811 | * When zio_alloc_zil() fails to allocate the next lwb block on | |
2812 | * disk, we must call txg_wait_synced() to ensure all of the | |
2813 | * lwbs in the zilog's zl_lwb_list are synced and then freed (in | |
2814 | * zil_sync()), such that any subsequent ZIL writer (i.e. a call | |
2815 | * to zil_process_commit_list()) will have to call zil_create(), | |
2816 | * and start a new ZIL chain. | |
2817 | * | |
2818 | * Since zil_alloc_zil() failed, the lwb that was previously | |
2819 | * issued does not have a pointer to the "next" lwb on disk. | |
2820 | * Thus, if another ZIL writer thread was to allocate the "next" | |
2821 | * on-disk lwb, that block could be leaked in the event of a | |
2822 | * crash (because the previous lwb on-disk would not point to | |
2823 | * it). | |
2824 | * | |
1b2b0aca | 2825 | * We must hold the zilog's zl_issuer_lock while we do this, to |
1ce23dca PS |
2826 | * ensure no new threads enter zil_process_commit_list() until |
2827 | * all lwb's in the zl_lwb_list have been synced and freed | |
2828 | * (which is achieved via the txg_wait_synced() call). | |
2829 | */ | |
1b2b0aca | 2830 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca | 2831 | txg_wait_synced(zilog->zl_dmu_pool, 0); |
895e0313 | 2832 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); |
1ce23dca PS |
2833 | } |
2834 | ||
252f46be AM |
2835 | static void |
2836 | zil_burst_done(zilog_t *zilog) | |
2837 | { | |
2838 | if (!list_is_empty(&zilog->zl_itx_commit_list) || | |
eff77a80 | 2839 | zilog->zl_cur_size == 0) |
252f46be AM |
2840 | return; |
2841 | ||
2842 | if (zilog->zl_parallel) | |
2843 | zilog->zl_parallel--; | |
2844 | ||
eff77a80 AM |
2845 | uint_t r = (zilog->zl_prev_rotor + 1) & (ZIL_BURSTS - 1); |
2846 | zilog->zl_prev_rotor = r; | |
2847 | zilog->zl_prev_opt[r] = zil_lwb_plan(zilog, zilog->zl_cur_size, | |
2848 | &zilog->zl_prev_min[r]); | |
2849 | ||
2850 | zilog->zl_cur_size = 0; | |
2851 | zilog->zl_cur_max = 0; | |
2852 | zilog->zl_cur_left = 0; | |
252f46be AM |
2853 | } |
2854 | ||
1ce23dca PS |
2855 | /* |
2856 | * This function will traverse the commit list, creating new lwbs as | |
2857 | * needed, and committing the itxs from the commit list to these newly | |
2858 | * created lwbs. Additionally, as a new lwb is created, the previous | |
2859 | * lwb will be issued to the zio layer to be written to disk. | |
2860 | */ | |
2861 | static void | |
f63811f0 | 2862 | zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs) |
1ce23dca PS |
2863 | { |
2864 | spa_t *spa = zilog->zl_spa; | |
2865 | list_t nolwb_itxs; | |
2866 | list_t nolwb_waiters; | |
0f740a4f | 2867 | lwb_t *lwb, *plwb; |
1ce23dca PS |
2868 | itx_t *itx; |
2869 | ||
1b2b0aca | 2870 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
572e2857 BB |
2871 | |
2872 | /* | |
2873 | * Return if there's nothing to commit before we dirty the fs by | |
2874 | * calling zil_create(). | |
2875 | */ | |
895e0313 | 2876 | if (list_is_empty(&zilog->zl_itx_commit_list)) |
572e2857 | 2877 | return; |
34dc7c2f | 2878 | |
1ce23dca PS |
2879 | list_create(&nolwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node)); |
2880 | list_create(&nolwb_waiters, sizeof (zil_commit_waiter_t), | |
2881 | offsetof(zil_commit_waiter_t, zcw_node)); | |
2882 | ||
2883 | lwb = list_tail(&zilog->zl_lwb_list); | |
2884 | if (lwb == NULL) { | |
2885 | lwb = zil_create(zilog); | |
34dc7c2f | 2886 | } else { |
361a7e82 JP |
2887 | /* |
2888 | * Activate SPA_FEATURE_ZILSAXATTR for the cases where ZIL will | |
2889 | * have already been created (zl_lwb_list not empty). | |
2890 | */ | |
2891 | zil_commit_activate_saxattr_feature(zilog); | |
eda3fcd5 AM |
2892 | ASSERT(lwb->lwb_state == LWB_STATE_NEW || |
2893 | lwb->lwb_state == LWB_STATE_OPENED); | |
252f46be AM |
2894 | |
2895 | /* | |
2896 | * If the lwb is still opened, it means the workload is really | |
2897 | * multi-threaded and we won the chance of write aggregation. | |
2898 | * If it is not opened yet, but previous lwb is still not | |
2899 | * flushed, it still means the workload is multi-threaded, but | |
2900 | * there was too much time between the commits to aggregate, so | |
2901 | * we try aggregation next times, but without too much hopes. | |
2902 | */ | |
2903 | if (lwb->lwb_state == LWB_STATE_OPENED) { | |
2904 | zilog->zl_parallel = ZIL_BURSTS; | |
2905 | } else if ((plwb = list_prev(&zilog->zl_lwb_list, lwb)) | |
2906 | != NULL && plwb->lwb_state != LWB_STATE_FLUSH_DONE) { | |
2907 | zilog->zl_parallel = MAX(zilog->zl_parallel, | |
2908 | ZIL_BURSTS / 2); | |
2909 | } | |
34dc7c2f BB |
2910 | } |
2911 | ||
895e0313 | 2912 | while ((itx = list_remove_head(&zilog->zl_itx_commit_list)) != NULL) { |
1ce23dca PS |
2913 | lr_t *lrc = &itx->itx_lr; |
2914 | uint64_t txg = lrc->lrc_txg; | |
2915 | ||
55922e73 | 2916 | ASSERT3U(txg, !=, 0); |
34dc7c2f | 2917 | |
1ce23dca PS |
2918 | if (lrc->lrc_txtype == TX_COMMIT) { |
2919 | DTRACE_PROBE2(zil__process__commit__itx, | |
2920 | zilog_t *, zilog, itx_t *, itx); | |
2921 | } else { | |
2922 | DTRACE_PROBE2(zil__process__normal__itx, | |
2923 | zilog_t *, zilog, itx_t *, itx); | |
2924 | } | |
2925 | ||
1ce23dca PS |
2926 | boolean_t synced = txg <= spa_last_synced_txg(spa); |
2927 | boolean_t frozen = txg > spa_freeze_txg(spa); | |
2928 | ||
2fe61a7e PS |
2929 | /* |
2930 | * If the txg of this itx has already been synced out, then | |
2931 | * we don't need to commit this itx to an lwb. This is | |
2932 | * because the data of this itx will have already been | |
2933 | * written to the main pool. This is inherently racy, and | |
2934 | * it's still ok to commit an itx whose txg has already | |
2935 | * been synced; this will result in a write that's | |
2936 | * unnecessary, but will do no harm. | |
2937 | * | |
2938 | * With that said, we always want to commit TX_COMMIT itxs | |
2939 | * to an lwb, regardless of whether or not that itx's txg | |
2940 | * has been synced out. We do this to ensure any OPENED lwb | |
2941 | * will always have at least one zil_commit_waiter_t linked | |
2942 | * to the lwb. | |
2943 | * | |
2944 | * As a counter-example, if we skipped TX_COMMIT itx's | |
2945 | * whose txg had already been synced, the following | |
2946 | * situation could occur if we happened to be racing with | |
2947 | * spa_sync: | |
2948 | * | |
2949 | * 1. We commit a non-TX_COMMIT itx to an lwb, where the | |
2950 | * itx's txg is 10 and the last synced txg is 9. | |
2951 | * 2. spa_sync finishes syncing out txg 10. | |
2952 | * 3. We move to the next itx in the list, it's a TX_COMMIT | |
2953 | * whose txg is 10, so we skip it rather than committing | |
2954 | * it to the lwb used in (1). | |
2955 | * | |
2956 | * If the itx that is skipped in (3) is the last TX_COMMIT | |
2957 | * itx in the commit list, than it's possible for the lwb | |
2958 | * used in (1) to remain in the OPENED state indefinitely. | |
2959 | * | |
2960 | * To prevent the above scenario from occurring, ensuring | |
2961 | * that once an lwb is OPENED it will transition to ISSUED | |
2962 | * and eventually DONE, we always commit TX_COMMIT itx's to | |
2963 | * an lwb here, even if that itx's txg has already been | |
2964 | * synced. | |
2965 | * | |
2966 | * Finally, if the pool is frozen, we _always_ commit the | |
2967 | * itx. The point of freezing the pool is to prevent data | |
2968 | * from being written to the main pool via spa_sync, and | |
2969 | * instead rely solely on the ZIL to persistently store the | |
2970 | * data; i.e. when the pool is frozen, the last synced txg | |
2971 | * value can't be trusted. | |
2972 | */ | |
2973 | if (frozen || !synced || lrc->lrc_txtype == TX_COMMIT) { | |
1ce23dca | 2974 | if (lwb != NULL) { |
f63811f0 AM |
2975 | lwb = zil_lwb_assign(zilog, lwb, itx, ilwbs); |
2976 | if (lwb == NULL) { | |
1ce23dca | 2977 | list_insert_tail(&nolwb_itxs, itx); |
f63811f0 AM |
2978 | } else if ((zcw->zcw_lwb != NULL && |
2979 | zcw->zcw_lwb != lwb) || zcw->zcw_done) { | |
2980 | /* | |
2981 | * Our lwb is done, leave the rest of | |
2982 | * itx list to somebody else who care. | |
2983 | */ | |
252f46be | 2984 | zilog->zl_parallel = ZIL_BURSTS; |
eff77a80 AM |
2985 | zilog->zl_cur_left -= |
2986 | zil_itx_full_size(itx); | |
f63811f0 AM |
2987 | break; |
2988 | } | |
1ce23dca PS |
2989 | } else { |
2990 | if (lrc->lrc_txtype == TX_COMMIT) { | |
2991 | zil_commit_waiter_link_nolwb( | |
2992 | itx->itx_private, &nolwb_waiters); | |
2993 | } | |
1ce23dca PS |
2994 | list_insert_tail(&nolwb_itxs, itx); |
2995 | } | |
eff77a80 | 2996 | zilog->zl_cur_left -= zil_itx_full_size(itx); |
1ce23dca | 2997 | } else { |
2fe61a7e | 2998 | ASSERT3S(lrc->lrc_txtype, !=, TX_COMMIT); |
eff77a80 | 2999 | zilog->zl_cur_left -= zil_itx_full_size(itx); |
1ce23dca PS |
3000 | zil_itx_destroy(itx); |
3001 | } | |
34dc7c2f | 3002 | } |
34dc7c2f | 3003 | |
1ce23dca PS |
3004 | if (lwb == NULL) { |
3005 | /* | |
3006 | * This indicates zio_alloc_zil() failed to allocate the | |
3007 | * "next" lwb on-disk. When this happens, we must stall | |
3008 | * the ZIL write pipeline; see the comment within | |
3009 | * zil_commit_writer_stall() for more details. | |
3010 | */ | |
f63811f0 AM |
3011 | while ((lwb = list_remove_head(ilwbs)) != NULL) |
3012 | zil_lwb_write_issue(zilog, lwb); | |
1ce23dca | 3013 | zil_commit_writer_stall(zilog); |
34dc7c2f | 3014 | |
1ce23dca PS |
3015 | /* |
3016 | * Additionally, we have to signal and mark the "nolwb" | |
3017 | * waiters as "done" here, since without an lwb, we | |
3018 | * can't do this via zil_lwb_flush_vdevs_done() like | |
3019 | * normal. | |
3020 | */ | |
3021 | zil_commit_waiter_t *zcw; | |
895e0313 | 3022 | while ((zcw = list_remove_head(&nolwb_waiters)) != NULL) |
1ce23dca | 3023 | zil_commit_waiter_skip(zcw); |
1ce23dca PS |
3024 | |
3025 | /* | |
3026 | * And finally, we have to destroy the itx's that | |
3027 | * couldn't be committed to an lwb; this will also call | |
3028 | * the itx's callback if one exists for the itx. | |
3029 | */ | |
895e0313 | 3030 | while ((itx = list_remove_head(&nolwb_itxs)) != NULL) |
1ce23dca | 3031 | zil_itx_destroy(itx); |
1ce23dca PS |
3032 | } else { |
3033 | ASSERT(list_is_empty(&nolwb_waiters)); | |
3034 | ASSERT3P(lwb, !=, NULL); | |
eda3fcd5 AM |
3035 | ASSERT(lwb->lwb_state == LWB_STATE_NEW || |
3036 | lwb->lwb_state == LWB_STATE_OPENED); | |
1ce23dca PS |
3037 | |
3038 | /* | |
3039 | * At this point, the ZIL block pointed at by the "lwb" | |
eda3fcd5 | 3040 | * variable is in "new" or "opened" state. |
1ce23dca | 3041 | * |
eda3fcd5 AM |
3042 | * If it's "new", then no itxs have been committed to it, so |
3043 | * there's no point in issuing its zio (i.e. it's "empty"). | |
1ce23dca | 3044 | * |
eda3fcd5 | 3045 | * If it's "opened", then it contains one or more itxs that |
2fe61a7e PS |
3046 | * eventually need to be committed to stable storage. In |
3047 | * this case we intentionally do not issue the lwb's zio | |
3048 | * to disk yet, and instead rely on one of the following | |
3049 | * two mechanisms for issuing the zio: | |
1ce23dca | 3050 | * |
eda3fcd5 AM |
3051 | * 1. Ideally, there will be more ZIL activity occurring on |
3052 | * the system, such that this function will be immediately | |
3053 | * called again by different thread and this lwb will be | |
3054 | * closed by zil_lwb_assign(). This way, the lwb will be | |
3055 | * "full" when it is issued to disk, and we'll make use of | |
3056 | * the lwb's size the best we can. | |
1ce23dca | 3057 | * |
2fe61a7e | 3058 | * 2. If there isn't sufficient ZIL activity occurring on |
eda3fcd5 AM |
3059 | * the system, zil_commit_waiter() will close it and issue |
3060 | * the zio. If this occurs, the lwb is not guaranteed | |
1ce23dca PS |
3061 | * to be "full" by the time its zio is issued, and means |
3062 | * the size of the lwb was "too large" given the amount | |
2fe61a7e | 3063 | * of ZIL activity occurring on the system at that time. |
1ce23dca PS |
3064 | * |
3065 | * We do this for a couple of reasons: | |
3066 | * | |
3067 | * 1. To try and reduce the number of IOPs needed to | |
3068 | * write the same number of itxs. If an lwb has space | |
2fe61a7e | 3069 | * available in its buffer for more itxs, and more itxs |
1ce23dca PS |
3070 | * will be committed relatively soon (relative to the |
3071 | * latency of performing a write), then it's beneficial | |
3072 | * to wait for these "next" itxs. This way, more itxs | |
3073 | * can be committed to stable storage with fewer writes. | |
3074 | * | |
3075 | * 2. To try and use the largest lwb block size that the | |
3076 | * incoming rate of itxs can support. Again, this is to | |
3077 | * try and pack as many itxs into as few lwbs as | |
3078 | * possible, without significantly impacting the latency | |
3079 | * of each individual itx. | |
3080 | */ | |
252f46be | 3081 | if (lwb->lwb_state == LWB_STATE_OPENED && !zilog->zl_parallel) { |
eff77a80 | 3082 | zil_burst_done(zilog); |
252f46be AM |
3083 | list_insert_tail(ilwbs, lwb); |
3084 | lwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_NEW); | |
252f46be AM |
3085 | if (lwb == NULL) { |
3086 | while ((lwb = list_remove_head(ilwbs)) != NULL) | |
3087 | zil_lwb_write_issue(zilog, lwb); | |
3088 | zil_commit_writer_stall(zilog); | |
0f740a4f AM |
3089 | } |
3090 | } | |
1ce23dca PS |
3091 | } |
3092 | } | |
3093 | ||
3094 | /* | |
3095 | * This function is responsible for ensuring the passed in commit waiter | |
3096 | * (and associated commit itx) is committed to an lwb. If the waiter is | |
3097 | * not already committed to an lwb, all itxs in the zilog's queue of | |
3098 | * itxs will be processed. The assumption is the passed in waiter's | |
3099 | * commit itx will found in the queue just like the other non-commit | |
3100 | * itxs, such that when the entire queue is processed, the waiter will | |
2fe61a7e | 3101 | * have been committed to an lwb. |
1ce23dca PS |
3102 | * |
3103 | * The lwb associated with the passed in waiter is not guaranteed to | |
3104 | * have been issued by the time this function completes. If the lwb is | |
3105 | * not issued, we rely on future calls to zil_commit_writer() to issue | |
3106 | * the lwb, or the timeout mechanism found in zil_commit_waiter(). | |
3107 | */ | |
233425a1 | 3108 | static uint64_t |
1ce23dca PS |
3109 | zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw) |
3110 | { | |
f63811f0 AM |
3111 | list_t ilwbs; |
3112 | lwb_t *lwb; | |
233425a1 | 3113 | uint64_t wtxg = 0; |
f63811f0 | 3114 | |
1ce23dca PS |
3115 | ASSERT(!MUTEX_HELD(&zilog->zl_lock)); |
3116 | ASSERT(spa_writeable(zilog->zl_spa)); | |
1ce23dca | 3117 | |
f63811f0 | 3118 | list_create(&ilwbs, sizeof (lwb_t), offsetof(lwb_t, lwb_issue_node)); |
1b2b0aca | 3119 | mutex_enter(&zilog->zl_issuer_lock); |
1ce23dca PS |
3120 | |
3121 | if (zcw->zcw_lwb != NULL || zcw->zcw_done) { | |
3122 | /* | |
3123 | * It's possible that, while we were waiting to acquire | |
1b2b0aca | 3124 | * the "zl_issuer_lock", another thread committed this |
1ce23dca PS |
3125 | * waiter to an lwb. If that occurs, we bail out early, |
3126 | * without processing any of the zilog's queue of itxs. | |
3127 | * | |
3128 | * On certain workloads and system configurations, the | |
1b2b0aca | 3129 | * "zl_issuer_lock" can become highly contended. In an |
1ce23dca PS |
3130 | * attempt to reduce this contention, we immediately drop |
3131 | * the lock if the waiter has already been processed. | |
3132 | * | |
3133 | * We've measured this optimization to reduce CPU spent | |
3134 | * contending on this lock by up to 5%, using a system | |
3135 | * with 32 CPUs, low latency storage (~50 usec writes), | |
3136 | * and 1024 threads performing sync writes. | |
3137 | */ | |
3138 | goto out; | |
3139 | } | |
3140 | ||
fb087146 | 3141 | ZIL_STAT_BUMP(zilog, zil_commit_writer_count); |
1ce23dca | 3142 | |
233425a1 | 3143 | wtxg = zil_get_commit_list(zilog); |
1ce23dca | 3144 | zil_prune_commit_list(zilog); |
f63811f0 | 3145 | zil_process_commit_list(zilog, zcw, &ilwbs); |
1ce23dca PS |
3146 | |
3147 | out: | |
1b2b0aca | 3148 | mutex_exit(&zilog->zl_issuer_lock); |
f63811f0 AM |
3149 | while ((lwb = list_remove_head(&ilwbs)) != NULL) |
3150 | zil_lwb_write_issue(zilog, lwb); | |
3151 | list_destroy(&ilwbs); | |
233425a1 | 3152 | return (wtxg); |
1ce23dca PS |
3153 | } |
3154 | ||
3155 | static void | |
3156 | zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw) | |
3157 | { | |
1b2b0aca | 3158 | ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca PS |
3159 | ASSERT(MUTEX_HELD(&zcw->zcw_lock)); |
3160 | ASSERT3B(zcw->zcw_done, ==, B_FALSE); | |
3161 | ||
3162 | lwb_t *lwb = zcw->zcw_lwb; | |
3163 | ASSERT3P(lwb, !=, NULL); | |
eda3fcd5 | 3164 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_NEW); |
34dc7c2f BB |
3165 | |
3166 | /* | |
1ce23dca PS |
3167 | * If the lwb has already been issued by another thread, we can |
3168 | * immediately return since there's no work to be done (the | |
3169 | * point of this function is to issue the lwb). Additionally, we | |
1b2b0aca | 3170 | * do this prior to acquiring the zl_issuer_lock, to avoid |
1ce23dca | 3171 | * acquiring it when it's not necessary to do so. |
34dc7c2f | 3172 | */ |
eda3fcd5 | 3173 | if (lwb->lwb_state != LWB_STATE_OPENED) |
1ce23dca | 3174 | return; |
34dc7c2f | 3175 | |
1ce23dca | 3176 | /* |
f63811f0 | 3177 | * In order to call zil_lwb_write_close() we must hold the |
1b2b0aca | 3178 | * zilog's "zl_issuer_lock". We can't simply acquire that lock, |
1ce23dca | 3179 | * since we're already holding the commit waiter's "zcw_lock", |
2fe61a7e | 3180 | * and those two locks are acquired in the opposite order |
1ce23dca PS |
3181 | * elsewhere. |
3182 | */ | |
3183 | mutex_exit(&zcw->zcw_lock); | |
1b2b0aca | 3184 | mutex_enter(&zilog->zl_issuer_lock); |
1ce23dca | 3185 | mutex_enter(&zcw->zcw_lock); |
34dc7c2f | 3186 | |
1ce23dca PS |
3187 | /* |
3188 | * Since we just dropped and re-acquired the commit waiter's | |
3189 | * lock, we have to re-check to see if the waiter was marked | |
3190 | * "done" during that process. If the waiter was marked "done", | |
3191 | * the "lwb" pointer is no longer valid (it can be free'd after | |
3192 | * the waiter is marked "done"), so without this check we could | |
3193 | * wind up with a use-after-free error below. | |
3194 | */ | |
f63811f0 | 3195 | if (zcw->zcw_done) { |
eda3fcd5 AM |
3196 | mutex_exit(&zilog->zl_issuer_lock); |
3197 | return; | |
f63811f0 | 3198 | } |
119a394a | 3199 | |
1ce23dca PS |
3200 | ASSERT3P(lwb, ==, zcw->zcw_lwb); |
3201 | ||
3202 | /* | |
2fe61a7e PS |
3203 | * We've already checked this above, but since we hadn't acquired |
3204 | * the zilog's zl_issuer_lock, we have to perform this check a | |
3205 | * second time while holding the lock. | |
3206 | * | |
3207 | * We don't need to hold the zl_lock since the lwb cannot transition | |
eda3fcd5 AM |
3208 | * from OPENED to CLOSED while we hold the zl_issuer_lock. The lwb |
3209 | * _can_ transition from CLOSED to DONE, but it's OK to race with | |
2fe61a7e | 3210 | * that transition since we treat the lwb the same, whether it's in |
eda3fcd5 | 3211 | * the CLOSED, ISSUED or DONE states. |
2fe61a7e PS |
3212 | * |
3213 | * The important thing, is we treat the lwb differently depending on | |
eda3fcd5 AM |
3214 | * if it's OPENED or CLOSED, and block any other threads that might |
3215 | * attempt to close/issue this lwb. For that reason we hold the | |
2fe61a7e | 3216 | * zl_issuer_lock when checking the lwb_state; we must not call |
eda3fcd5 | 3217 | * zil_lwb_write_close() if the lwb had already been closed/issued. |
2fe61a7e PS |
3218 | * |
3219 | * See the comment above the lwb_state_t structure definition for | |
3220 | * more details on the lwb states, and locking requirements. | |
1ce23dca | 3221 | */ |
eda3fcd5 AM |
3222 | if (lwb->lwb_state != LWB_STATE_OPENED) { |
3223 | mutex_exit(&zilog->zl_issuer_lock); | |
3224 | return; | |
f63811f0 | 3225 | } |
1ce23dca | 3226 | |
eda3fcd5 AM |
3227 | /* |
3228 | * We do not need zcw_lock once we hold zl_issuer_lock and know lwb | |
3229 | * is still open. But we have to drop it to avoid a deadlock in case | |
3230 | * callback of zio issued by zil_lwb_write_issue() try to get it, | |
3231 | * while zil_lwb_write_issue() is blocked on attempt to issue next | |
3232 | * lwb it found in LWB_STATE_READY state. | |
3233 | */ | |
3234 | mutex_exit(&zcw->zcw_lock); | |
1ce23dca PS |
3235 | |
3236 | /* | |
3237 | * As described in the comments above zil_commit_waiter() and | |
3238 | * zil_process_commit_list(), we need to issue this lwb's zio | |
3239 | * since we've reached the commit waiter's timeout and it still | |
3240 | * hasn't been issued. | |
3241 | */ | |
eff77a80 | 3242 | zil_burst_done(zilog); |
eda3fcd5 | 3243 | lwb_t *nlwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_NEW); |
1ce23dca | 3244 | |
eda3fcd5 | 3245 | ASSERT3S(lwb->lwb_state, ==, LWB_STATE_CLOSED); |
1ce23dca | 3246 | |
1ce23dca PS |
3247 | if (nlwb == NULL) { |
3248 | /* | |
f63811f0 | 3249 | * When zil_lwb_write_close() returns NULL, this |
1ce23dca PS |
3250 | * indicates zio_alloc_zil() failed to allocate the |
3251 | * "next" lwb on-disk. When this occurs, the ZIL write | |
3252 | * pipeline must be stalled; see the comment within the | |
3253 | * zil_commit_writer_stall() function for more details. | |
1ce23dca | 3254 | */ |
f63811f0 | 3255 | zil_lwb_write_issue(zilog, lwb); |
1ce23dca | 3256 | zil_commit_writer_stall(zilog); |
eda3fcd5 AM |
3257 | mutex_exit(&zilog->zl_issuer_lock); |
3258 | } else { | |
3259 | mutex_exit(&zilog->zl_issuer_lock); | |
f63811f0 | 3260 | zil_lwb_write_issue(zilog, lwb); |
eda3fcd5 AM |
3261 | } |
3262 | mutex_enter(&zcw->zcw_lock); | |
1ce23dca PS |
3263 | } |
3264 | ||
3265 | /* | |
3266 | * This function is responsible for performing the following two tasks: | |
3267 | * | |
3268 | * 1. its primary responsibility is to block until the given "commit | |
3269 | * waiter" is considered "done". | |
3270 | * | |
3271 | * 2. its secondary responsibility is to issue the zio for the lwb that | |
3272 | * the given "commit waiter" is waiting on, if this function has | |
3273 | * waited "long enough" and the lwb is still in the "open" state. | |
3274 | * | |
3275 | * Given a sufficient amount of itxs being generated and written using | |
f63811f0 | 3276 | * the ZIL, the lwb's zio will be issued via the zil_lwb_assign() |
1ce23dca PS |
3277 | * function. If this does not occur, this secondary responsibility will |
3278 | * ensure the lwb is issued even if there is not other synchronous | |
3279 | * activity on the system. | |
3280 | * | |
3281 | * For more details, see zil_process_commit_list(); more specifically, | |
3282 | * the comment at the bottom of that function. | |
3283 | */ | |
3284 | static void | |
3285 | zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw) | |
3286 | { | |
3287 | ASSERT(!MUTEX_HELD(&zilog->zl_lock)); | |
1b2b0aca | 3288 | ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca | 3289 | ASSERT(spa_writeable(zilog->zl_spa)); |
1ce23dca PS |
3290 | |
3291 | mutex_enter(&zcw->zcw_lock); | |
428870ff BB |
3292 | |
3293 | /* | |
1ce23dca PS |
3294 | * The timeout is scaled based on the lwb latency to avoid |
3295 | * significantly impacting the latency of each individual itx. | |
3296 | * For more details, see the comment at the bottom of the | |
3297 | * zil_process_commit_list() function. | |
428870ff | 3298 | */ |
1ce23dca PS |
3299 | int pct = MAX(zfs_commit_timeout_pct, 1); |
3300 | hrtime_t sleep = (zilog->zl_last_lwb_latency * pct) / 100; | |
3301 | hrtime_t wakeup = gethrtime() + sleep; | |
3302 | boolean_t timedout = B_FALSE; | |
3303 | ||
3304 | while (!zcw->zcw_done) { | |
3305 | ASSERT(MUTEX_HELD(&zcw->zcw_lock)); | |
3306 | ||
3307 | lwb_t *lwb = zcw->zcw_lwb; | |
3308 | ||
3309 | /* | |
3310 | * Usually, the waiter will have a non-NULL lwb field here, | |
3311 | * but it's possible for it to be NULL as a result of | |
3312 | * zil_commit() racing with spa_sync(). | |
3313 | * | |
3314 | * When zil_clean() is called, it's possible for the itxg | |
3315 | * list (which may be cleaned via a taskq) to contain | |
3316 | * commit itxs. When this occurs, the commit waiters linked | |
3317 | * off of these commit itxs will not be committed to an | |
3318 | * lwb. Additionally, these commit waiters will not be | |
3319 | * marked done until zil_commit_waiter_skip() is called via | |
3320 | * zil_itxg_clean(). | |
3321 | * | |
3322 | * Thus, it's possible for this commit waiter (i.e. the | |
3323 | * "zcw" variable) to be found in this "in between" state; | |
3324 | * where it's "zcw_lwb" field is NULL, and it hasn't yet | |
3325 | * been skipped, so it's "zcw_done" field is still B_FALSE. | |
3326 | */ | |
eda3fcd5 | 3327 | IMPLY(lwb != NULL, lwb->lwb_state != LWB_STATE_NEW); |
1ce23dca PS |
3328 | |
3329 | if (lwb != NULL && lwb->lwb_state == LWB_STATE_OPENED) { | |
3330 | ASSERT3B(timedout, ==, B_FALSE); | |
3331 | ||
3332 | /* | |
3333 | * If the lwb hasn't been issued yet, then we | |
3334 | * need to wait with a timeout, in case this | |
3335 | * function needs to issue the lwb after the | |
3336 | * timeout is reached; responsibility (2) from | |
3337 | * the comment above this function. | |
3338 | */ | |
8056a756 | 3339 | int rc = cv_timedwait_hires(&zcw->zcw_cv, |
1ce23dca PS |
3340 | &zcw->zcw_lock, wakeup, USEC2NSEC(1), |
3341 | CALLOUT_FLAG_ABSOLUTE); | |
3342 | ||
8056a756 | 3343 | if (rc != -1 || zcw->zcw_done) |
1ce23dca PS |
3344 | continue; |
3345 | ||
3346 | timedout = B_TRUE; | |
3347 | zil_commit_waiter_timeout(zilog, zcw); | |
3348 | ||
3349 | if (!zcw->zcw_done) { | |
3350 | /* | |
3351 | * If the commit waiter has already been | |
3352 | * marked "done", it's possible for the | |
3353 | * waiter's lwb structure to have already | |
3354 | * been freed. Thus, we can only reliably | |
3355 | * make these assertions if the waiter | |
3356 | * isn't done. | |
3357 | */ | |
3358 | ASSERT3P(lwb, ==, zcw->zcw_lwb); | |
3359 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED); | |
3360 | } | |
3361 | } else { | |
3362 | /* | |
3363 | * If the lwb isn't open, then it must have already | |
3364 | * been issued. In that case, there's no need to | |
3365 | * use a timeout when waiting for the lwb to | |
3366 | * complete. | |
3367 | * | |
3368 | * Additionally, if the lwb is NULL, the waiter | |
2fe61a7e | 3369 | * will soon be signaled and marked done via |
1ce23dca PS |
3370 | * zil_clean() and zil_itxg_clean(), so no timeout |
3371 | * is required. | |
3372 | */ | |
3373 | ||
3374 | IMPLY(lwb != NULL, | |
eda3fcd5 AM |
3375 | lwb->lwb_state == LWB_STATE_CLOSED || |
3376 | lwb->lwb_state == LWB_STATE_READY || | |
1ce23dca | 3377 | lwb->lwb_state == LWB_STATE_ISSUED || |
900d09b2 PS |
3378 | lwb->lwb_state == LWB_STATE_WRITE_DONE || |
3379 | lwb->lwb_state == LWB_STATE_FLUSH_DONE); | |
1ce23dca PS |
3380 | cv_wait(&zcw->zcw_cv, &zcw->zcw_lock); |
3381 | } | |
3382 | } | |
3383 | ||
3384 | mutex_exit(&zcw->zcw_lock); | |
3385 | } | |
3386 | ||
3387 | static zil_commit_waiter_t * | |
3388 | zil_alloc_commit_waiter(void) | |
3389 | { | |
3390 | zil_commit_waiter_t *zcw = kmem_cache_alloc(zil_zcw_cache, KM_SLEEP); | |
3391 | ||
3392 | cv_init(&zcw->zcw_cv, NULL, CV_DEFAULT, NULL); | |
3393 | mutex_init(&zcw->zcw_lock, NULL, MUTEX_DEFAULT, NULL); | |
3394 | list_link_init(&zcw->zcw_node); | |
3395 | zcw->zcw_lwb = NULL; | |
3396 | zcw->zcw_done = B_FALSE; | |
3397 | zcw->zcw_zio_error = 0; | |
3398 | ||
3399 | return (zcw); | |
3400 | } | |
3401 | ||
3402 | static void | |
3403 | zil_free_commit_waiter(zil_commit_waiter_t *zcw) | |
3404 | { | |
3405 | ASSERT(!list_link_active(&zcw->zcw_node)); | |
3406 | ASSERT3P(zcw->zcw_lwb, ==, NULL); | |
3407 | ASSERT3B(zcw->zcw_done, ==, B_TRUE); | |
3408 | mutex_destroy(&zcw->zcw_lock); | |
3409 | cv_destroy(&zcw->zcw_cv); | |
3410 | kmem_cache_free(zil_zcw_cache, zcw); | |
34dc7c2f BB |
3411 | } |
3412 | ||
3413 | /* | |
1ce23dca PS |
3414 | * This function is used to create a TX_COMMIT itx and assign it. This |
3415 | * way, it will be linked into the ZIL's list of synchronous itxs, and | |
3416 | * then later committed to an lwb (or skipped) when | |
3417 | * zil_process_commit_list() is called. | |
3418 | */ | |
3419 | static void | |
3420 | zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw) | |
3421 | { | |
3422 | dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); | |
2fd1c304 AM |
3423 | |
3424 | /* | |
3425 | * Since we are not going to create any new dirty data, and we | |
3426 | * can even help with clearing the existing dirty data, we | |
3427 | * should not be subject to the dirty data based delays. We | |
3428 | * use TXG_NOTHROTTLE to bypass the delay mechanism. | |
3429 | */ | |
3430 | VERIFY0(dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE)); | |
1ce23dca PS |
3431 | |
3432 | itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t)); | |
3433 | itx->itx_sync = B_TRUE; | |
3434 | itx->itx_private = zcw; | |
3435 | ||
3436 | zil_itx_assign(zilog, itx, tx); | |
3437 | ||
3438 | dmu_tx_commit(tx); | |
3439 | } | |
3440 | ||
3441 | /* | |
3442 | * Commit ZFS Intent Log transactions (itxs) to stable storage. | |
3443 | * | |
3444 | * When writing ZIL transactions to the on-disk representation of the | |
3445 | * ZIL, the itxs are committed to a Log Write Block (lwb). Multiple | |
3446 | * itxs can be committed to a single lwb. Once a lwb is written and | |
3447 | * committed to stable storage (i.e. the lwb is written, and vdevs have | |
3448 | * been flushed), each itx that was committed to that lwb is also | |
3449 | * considered to be committed to stable storage. | |
3450 | * | |
3451 | * When an itx is committed to an lwb, the log record (lr_t) contained | |
3452 | * by the itx is copied into the lwb's zio buffer, and once this buffer | |
3453 | * is written to disk, it becomes an on-disk ZIL block. | |
3454 | * | |
3455 | * As itxs are generated, they're inserted into the ZIL's queue of | |
3456 | * uncommitted itxs. The semantics of zil_commit() are such that it will | |
3457 | * block until all itxs that were in the queue when it was called, are | |
3458 | * committed to stable storage. | |
3459 | * | |
3460 | * If "foid" is zero, this means all "synchronous" and "asynchronous" | |
3461 | * itxs, for all objects in the dataset, will be committed to stable | |
3462 | * storage prior to zil_commit() returning. If "foid" is non-zero, all | |
3463 | * "synchronous" itxs for all objects, but only "asynchronous" itxs | |
3464 | * that correspond to the foid passed in, will be committed to stable | |
3465 | * storage prior to zil_commit() returning. | |
3466 | * | |
3467 | * Generally speaking, when zil_commit() is called, the consumer doesn't | |
3468 | * actually care about _all_ of the uncommitted itxs. Instead, they're | |
3469 | * simply trying to waiting for a specific itx to be committed to disk, | |
3470 | * but the interface(s) for interacting with the ZIL don't allow such | |
3471 | * fine-grained communication. A better interface would allow a consumer | |
3472 | * to create and assign an itx, and then pass a reference to this itx to | |
3473 | * zil_commit(); such that zil_commit() would return as soon as that | |
3474 | * specific itx was committed to disk (instead of waiting for _all_ | |
3475 | * itxs to be committed). | |
3476 | * | |
3477 | * When a thread calls zil_commit() a special "commit itx" will be | |
3478 | * generated, along with a corresponding "waiter" for this commit itx. | |
3479 | * zil_commit() will wait on this waiter's CV, such that when the waiter | |
2fe61a7e | 3480 | * is marked done, and signaled, zil_commit() will return. |
1ce23dca PS |
3481 | * |
3482 | * This commit itx is inserted into the queue of uncommitted itxs. This | |
3483 | * provides an easy mechanism for determining which itxs were in the | |
3484 | * queue prior to zil_commit() having been called, and which itxs were | |
3485 | * added after zil_commit() was called. | |
3486 | * | |
2310dba9 | 3487 | * The commit itx is special; it doesn't have any on-disk representation. |
1ce23dca PS |
3488 | * When a commit itx is "committed" to an lwb, the waiter associated |
3489 | * with it is linked onto the lwb's list of waiters. Then, when that lwb | |
2fe61a7e | 3490 | * completes, each waiter on the lwb's list is marked done and signaled |
1ce23dca PS |
3491 | * -- allowing the thread waiting on the waiter to return from zil_commit(). |
3492 | * | |
3493 | * It's important to point out a few critical factors that allow us | |
3494 | * to make use of the commit itxs, commit waiters, per-lwb lists of | |
3495 | * commit waiters, and zio completion callbacks like we're doing: | |
572e2857 | 3496 | * |
1ce23dca | 3497 | * 1. The list of waiters for each lwb is traversed, and each commit |
2fe61a7e | 3498 | * waiter is marked "done" and signaled, in the zio completion |
1ce23dca | 3499 | * callback of the lwb's zio[*]. |
572e2857 | 3500 | * |
2fe61a7e | 3501 | * * Actually, the waiters are signaled in the zio completion |
1ce23dca PS |
3502 | * callback of the root zio for the DKIOCFLUSHWRITECACHE commands |
3503 | * that are sent to the vdevs upon completion of the lwb zio. | |
572e2857 | 3504 | * |
1ce23dca PS |
3505 | * 2. When the itxs are inserted into the ZIL's queue of uncommitted |
3506 | * itxs, the order in which they are inserted is preserved[*]; as | |
3507 | * itxs are added to the queue, they are added to the tail of | |
3508 | * in-memory linked lists. | |
572e2857 | 3509 | * |
1ce23dca PS |
3510 | * When committing the itxs to lwbs (to be written to disk), they |
3511 | * are committed in the same order in which the itxs were added to | |
3512 | * the uncommitted queue's linked list(s); i.e. the linked list of | |
3513 | * itxs to commit is traversed from head to tail, and each itx is | |
3514 | * committed to an lwb in that order. | |
3515 | * | |
3516 | * * To clarify: | |
3517 | * | |
3518 | * - the order of "sync" itxs is preserved w.r.t. other | |
3519 | * "sync" itxs, regardless of the corresponding objects. | |
3520 | * - the order of "async" itxs is preserved w.r.t. other | |
3521 | * "async" itxs corresponding to the same object. | |
3522 | * - the order of "async" itxs is *not* preserved w.r.t. other | |
3523 | * "async" itxs corresponding to different objects. | |
3524 | * - the order of "sync" itxs w.r.t. "async" itxs (or vice | |
3525 | * versa) is *not* preserved, even for itxs that correspond | |
3526 | * to the same object. | |
3527 | * | |
3528 | * For more details, see: zil_itx_assign(), zil_async_to_sync(), | |
3529 | * zil_get_commit_list(), and zil_process_commit_list(). | |
3530 | * | |
3531 | * 3. The lwbs represent a linked list of blocks on disk. Thus, any | |
3532 | * lwb cannot be considered committed to stable storage, until its | |
3533 | * "previous" lwb is also committed to stable storage. This fact, | |
3534 | * coupled with the fact described above, means that itxs are | |
3535 | * committed in (roughly) the order in which they were generated. | |
3536 | * This is essential because itxs are dependent on prior itxs. | |
3537 | * Thus, we *must not* deem an itx as being committed to stable | |
3538 | * storage, until *all* prior itxs have also been committed to | |
3539 | * stable storage. | |
3540 | * | |
3541 | * To enforce this ordering of lwb zio's, while still leveraging as | |
3542 | * much of the underlying storage performance as possible, we rely | |
3543 | * on two fundamental concepts: | |
3544 | * | |
3545 | * 1. The creation and issuance of lwb zio's is protected by | |
1b2b0aca | 3546 | * the zilog's "zl_issuer_lock", which ensures only a single |
1ce23dca PS |
3547 | * thread is creating and/or issuing lwb's at a time |
3548 | * 2. The "previous" lwb is a child of the "current" lwb | |
2fe61a7e | 3549 | * (leveraging the zio parent-child dependency graph) |
1ce23dca PS |
3550 | * |
3551 | * By relying on this parent-child zio relationship, we can have | |
3552 | * many lwb zio's concurrently issued to the underlying storage, | |
3553 | * but the order in which they complete will be the same order in | |
3554 | * which they were created. | |
34dc7c2f BB |
3555 | */ |
3556 | void | |
572e2857 | 3557 | zil_commit(zilog_t *zilog, uint64_t foid) |
34dc7c2f | 3558 | { |
1ce23dca PS |
3559 | /* |
3560 | * We should never attempt to call zil_commit on a snapshot for | |
3561 | * a couple of reasons: | |
3562 | * | |
3563 | * 1. A snapshot may never be modified, thus it cannot have any | |
3564 | * in-flight itxs that would have modified the dataset. | |
3565 | * | |
3566 | * 2. By design, when zil_commit() is called, a commit itx will | |
3567 | * be assigned to this zilog; as a result, the zilog will be | |
3568 | * dirtied. We must not dirty the zilog of a snapshot; there's | |
3569 | * checks in the code that enforce this invariant, and will | |
3570 | * cause a panic if it's not upheld. | |
3571 | */ | |
3572 | ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE); | |
34dc7c2f | 3573 | |
572e2857 BB |
3574 | if (zilog->zl_sync == ZFS_SYNC_DISABLED) |
3575 | return; | |
34dc7c2f | 3576 | |
1ce23dca PS |
3577 | if (!spa_writeable(zilog->zl_spa)) { |
3578 | /* | |
3579 | * If the SPA is not writable, there should never be any | |
3580 | * pending itxs waiting to be committed to disk. If that | |
3581 | * weren't true, we'd skip writing those itxs out, and | |
2fe61a7e | 3582 | * would break the semantics of zil_commit(); thus, we're |
1ce23dca PS |
3583 | * verifying that truth before we return to the caller. |
3584 | */ | |
3585 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); | |
3586 | ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL); | |
3587 | for (int i = 0; i < TXG_SIZE; i++) | |
3588 | ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL); | |
3589 | return; | |
3590 | } | |
3591 | ||
3592 | /* | |
3593 | * If the ZIL is suspended, we don't want to dirty it by calling | |
3594 | * zil_commit_itx_assign() below, nor can we write out | |
3595 | * lwbs like would be done in zil_commit_write(). Thus, we | |
3596 | * simply rely on txg_wait_synced() to maintain the necessary | |
3597 | * semantics, and avoid calling those functions altogether. | |
3598 | */ | |
3599 | if (zilog->zl_suspend > 0) { | |
3600 | txg_wait_synced(zilog->zl_dmu_pool, 0); | |
3601 | return; | |
3602 | } | |
3603 | ||
2fe61a7e PS |
3604 | zil_commit_impl(zilog, foid); |
3605 | } | |
3606 | ||
3607 | void | |
3608 | zil_commit_impl(zilog_t *zilog, uint64_t foid) | |
3609 | { | |
fb087146 | 3610 | ZIL_STAT_BUMP(zilog, zil_commit_count); |
b6ad9671 | 3611 | |
1ce23dca PS |
3612 | /* |
3613 | * Move the "async" itxs for the specified foid to the "sync" | |
3614 | * queues, such that they will be later committed (or skipped) | |
3615 | * to an lwb when zil_process_commit_list() is called. | |
3616 | * | |
3617 | * Since these "async" itxs must be committed prior to this | |
3618 | * call to zil_commit returning, we must perform this operation | |
3619 | * before we call zil_commit_itx_assign(). | |
3620 | */ | |
572e2857 | 3621 | zil_async_to_sync(zilog, foid); |
34dc7c2f | 3622 | |
1ce23dca PS |
3623 | /* |
3624 | * We allocate a new "waiter" structure which will initially be | |
3625 | * linked to the commit itx using the itx's "itx_private" field. | |
3626 | * Since the commit itx doesn't represent any on-disk state, | |
3627 | * when it's committed to an lwb, rather than copying the its | |
3628 | * lr_t into the lwb's buffer, the commit itx's "waiter" will be | |
3629 | * added to the lwb's list of waiters. Then, when the lwb is | |
3630 | * committed to stable storage, each waiter in the lwb's list of | |
3631 | * waiters will be marked "done", and signalled. | |
3632 | * | |
3633 | * We must create the waiter and assign the commit itx prior to | |
3634 | * calling zil_commit_writer(), or else our specific commit itx | |
3635 | * is not guaranteed to be committed to an lwb prior to calling | |
3636 | * zil_commit_waiter(). | |
3637 | */ | |
3638 | zil_commit_waiter_t *zcw = zil_alloc_commit_waiter(); | |
3639 | zil_commit_itx_assign(zilog, zcw); | |
428870ff | 3640 | |
233425a1 | 3641 | uint64_t wtxg = zil_commit_writer(zilog, zcw); |
1ce23dca | 3642 | zil_commit_waiter(zilog, zcw); |
428870ff | 3643 | |
1ce23dca PS |
3644 | if (zcw->zcw_zio_error != 0) { |
3645 | /* | |
3646 | * If there was an error writing out the ZIL blocks that | |
3647 | * this thread is waiting on, then we fallback to | |
3648 | * relying on spa_sync() to write out the data this | |
3649 | * thread is waiting on. Obviously this has performance | |
3650 | * implications, but the expectation is for this to be | |
3651 | * an exceptional case, and shouldn't occur often. | |
3652 | */ | |
3653 | DTRACE_PROBE2(zil__commit__io__error, | |
3654 | zilog_t *, zilog, zil_commit_waiter_t *, zcw); | |
3655 | txg_wait_synced(zilog->zl_dmu_pool, 0); | |
233425a1 AM |
3656 | } else if (wtxg != 0) { |
3657 | txg_wait_synced(zilog->zl_dmu_pool, wtxg); | |
1ce23dca | 3658 | } |
8c0712fd | 3659 | |
1ce23dca | 3660 | zil_free_commit_waiter(zcw); |
428870ff BB |
3661 | } |
3662 | ||
34dc7c2f BB |
3663 | /* |
3664 | * Called in syncing context to free committed log blocks and update log header. | |
3665 | */ | |
3666 | void | |
3667 | zil_sync(zilog_t *zilog, dmu_tx_t *tx) | |
3668 | { | |
3669 | zil_header_t *zh = zil_header_in_syncing_context(zilog); | |
3670 | uint64_t txg = dmu_tx_get_txg(tx); | |
3671 | spa_t *spa = zilog->zl_spa; | |
428870ff | 3672 | uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK]; |
34dc7c2f BB |
3673 | lwb_t *lwb; |
3674 | ||
9babb374 BB |
3675 | /* |
3676 | * We don't zero out zl_destroy_txg, so make sure we don't try | |
3677 | * to destroy it twice. | |
3678 | */ | |
3679 | if (spa_sync_pass(spa) != 1) | |
3680 | return; | |
3681 | ||
152d6fda KJ |
3682 | zil_lwb_flush_wait_all(zilog, txg); |
3683 | ||
34dc7c2f BB |
3684 | mutex_enter(&zilog->zl_lock); |
3685 | ||
3686 | ASSERT(zilog->zl_stop_sync == 0); | |
3687 | ||
428870ff BB |
3688 | if (*replayed_seq != 0) { |
3689 | ASSERT(zh->zh_replay_seq < *replayed_seq); | |
3690 | zh->zh_replay_seq = *replayed_seq; | |
3691 | *replayed_seq = 0; | |
3692 | } | |
34dc7c2f BB |
3693 | |
3694 | if (zilog->zl_destroy_txg == txg) { | |
3695 | blkptr_t blk = zh->zh_log; | |
361a7e82 | 3696 | dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); |
34dc7c2f | 3697 | |
895e0313 | 3698 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); |
34dc7c2f | 3699 | |
861166b0 AZ |
3700 | memset(zh, 0, sizeof (zil_header_t)); |
3701 | memset(zilog->zl_replayed_seq, 0, | |
3702 | sizeof (zilog->zl_replayed_seq)); | |
34dc7c2f BB |
3703 | |
3704 | if (zilog->zl_keep_first) { | |
3705 | /* | |
3706 | * If this block was part of log chain that couldn't | |
3707 | * be claimed because a device was missing during | |
3708 | * zil_claim(), but that device later returns, | |
3709 | * then this block could erroneously appear valid. | |
3710 | * To guard against this, assign a new GUID to the new | |
3711 | * log chain so it doesn't matter what blk points to. | |
3712 | */ | |
3713 | zil_init_log_chain(zilog, &blk); | |
3714 | zh->zh_log = blk; | |
361a7e82 JP |
3715 | } else { |
3716 | /* | |
3717 | * A destroyed ZIL chain can't contain any TX_SETSAXATTR | |
3718 | * records. So, deactivate the feature for this dataset. | |
3719 | * We activate it again when we start a new ZIL chain. | |
3720 | */ | |
3721 | if (dsl_dataset_feature_is_active(ds, | |
3722 | SPA_FEATURE_ZILSAXATTR)) | |
3723 | dsl_dataset_deactivate_feature(ds, | |
3724 | SPA_FEATURE_ZILSAXATTR, tx); | |
34dc7c2f BB |
3725 | } |
3726 | } | |
3727 | ||
9babb374 | 3728 | while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { |
34dc7c2f | 3729 | zh->zh_log = lwb->lwb_blk; |
7381ddf1 | 3730 | if (lwb->lwb_state != LWB_STATE_FLUSH_DONE || |
eda3fcd5 | 3731 | lwb->lwb_alloc_txg > txg || lwb->lwb_max_txg > txg) |
34dc7c2f BB |
3732 | break; |
3733 | list_remove(&zilog->zl_lwb_list, lwb); | |
eda3fcd5 AM |
3734 | if (!BP_IS_HOLE(&lwb->lwb_blk)) |
3735 | zio_free(spa, txg, &lwb->lwb_blk); | |
1ce23dca | 3736 | zil_free_lwb(zilog, lwb); |
34dc7c2f BB |
3737 | |
3738 | /* | |
3739 | * If we don't have anything left in the lwb list then | |
3740 | * we've had an allocation failure and we need to zero | |
3741 | * out the zil_header blkptr so that we don't end | |
3742 | * up freeing the same block twice. | |
3743 | */ | |
895e0313 | 3744 | if (list_is_empty(&zilog->zl_lwb_list)) |
34dc7c2f BB |
3745 | BP_ZERO(&zh->zh_log); |
3746 | } | |
920dd524 | 3747 | |
34dc7c2f BB |
3748 | mutex_exit(&zilog->zl_lock); |
3749 | } | |
3750 | ||
1ce23dca PS |
3751 | static int |
3752 | zil_lwb_cons(void *vbuf, void *unused, int kmflag) | |
3753 | { | |
14e4e3cb | 3754 | (void) unused, (void) kmflag; |
1ce23dca PS |
3755 | lwb_t *lwb = vbuf; |
3756 | list_create(&lwb->lwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node)); | |
3757 | list_create(&lwb->lwb_waiters, sizeof (zil_commit_waiter_t), | |
3758 | offsetof(zil_commit_waiter_t, zcw_node)); | |
3759 | avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare, | |
3760 | sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node)); | |
3761 | mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL); | |
3762 | return (0); | |
3763 | } | |
3764 | ||
1ce23dca PS |
3765 | static void |
3766 | zil_lwb_dest(void *vbuf, void *unused) | |
3767 | { | |
14e4e3cb | 3768 | (void) unused; |
1ce23dca PS |
3769 | lwb_t *lwb = vbuf; |
3770 | mutex_destroy(&lwb->lwb_vdev_lock); | |
3771 | avl_destroy(&lwb->lwb_vdev_tree); | |
3772 | list_destroy(&lwb->lwb_waiters); | |
3773 | list_destroy(&lwb->lwb_itxs); | |
3774 | } | |
3775 | ||
34dc7c2f BB |
3776 | void |
3777 | zil_init(void) | |
3778 | { | |
3779 | zil_lwb_cache = kmem_cache_create("zil_lwb_cache", | |
1ce23dca PS |
3780 | sizeof (lwb_t), 0, zil_lwb_cons, zil_lwb_dest, NULL, NULL, NULL, 0); |
3781 | ||
3782 | zil_zcw_cache = kmem_cache_create("zil_zcw_cache", | |
3783 | sizeof (zil_commit_waiter_t), 0, NULL, NULL, NULL, NULL, NULL, 0); | |
b6ad9671 | 3784 | |
fb087146 AH |
3785 | zil_sums_init(&zil_sums_global); |
3786 | zil_kstats_global = kstat_create("zfs", 0, "zil", "misc", | |
d1d7e268 | 3787 | KSTAT_TYPE_NAMED, sizeof (zil_stats) / sizeof (kstat_named_t), |
b6ad9671 ED |
3788 | KSTAT_FLAG_VIRTUAL); |
3789 | ||
fb087146 AH |
3790 | if (zil_kstats_global != NULL) { |
3791 | zil_kstats_global->ks_data = &zil_stats; | |
3792 | zil_kstats_global->ks_update = zil_kstats_global_update; | |
3793 | zil_kstats_global->ks_private = NULL; | |
3794 | kstat_install(zil_kstats_global); | |
b6ad9671 | 3795 | } |
34dc7c2f BB |
3796 | } |
3797 | ||
3798 | void | |
3799 | zil_fini(void) | |
3800 | { | |
1ce23dca | 3801 | kmem_cache_destroy(zil_zcw_cache); |
34dc7c2f | 3802 | kmem_cache_destroy(zil_lwb_cache); |
b6ad9671 | 3803 | |
fb087146 AH |
3804 | if (zil_kstats_global != NULL) { |
3805 | kstat_delete(zil_kstats_global); | |
3806 | zil_kstats_global = NULL; | |
b6ad9671 | 3807 | } |
fb087146 AH |
3808 | |
3809 | zil_sums_fini(&zil_sums_global); | |
34dc7c2f BB |
3810 | } |
3811 | ||
428870ff BB |
3812 | void |
3813 | zil_set_sync(zilog_t *zilog, uint64_t sync) | |
3814 | { | |
3815 | zilog->zl_sync = sync; | |
3816 | } | |
3817 | ||
3818 | void | |
3819 | zil_set_logbias(zilog_t *zilog, uint64_t logbias) | |
3820 | { | |
3821 | zilog->zl_logbias = logbias; | |
3822 | } | |
3823 | ||
34dc7c2f BB |
3824 | zilog_t * |
3825 | zil_alloc(objset_t *os, zil_header_t *zh_phys) | |
3826 | { | |
3827 | zilog_t *zilog; | |
3828 | ||
79c76d5b | 3829 | zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); |
34dc7c2f BB |
3830 | |
3831 | zilog->zl_header = zh_phys; | |
3832 | zilog->zl_os = os; | |
3833 | zilog->zl_spa = dmu_objset_spa(os); | |
3834 | zilog->zl_dmu_pool = dmu_objset_pool(os); | |
3835 | zilog->zl_destroy_txg = TXG_INITIAL - 1; | |
428870ff BB |
3836 | zilog->zl_logbias = dmu_objset_logbias(os); |
3837 | zilog->zl_sync = dmu_objset_syncprop(os); | |
1ce23dca PS |
3838 | zilog->zl_dirty_max_txg = 0; |
3839 | zilog->zl_last_lwb_opened = NULL; | |
3840 | zilog->zl_last_lwb_latency = 0; | |
eff77a80 AM |
3841 | zilog->zl_max_block_size = MIN(MAX(P2ALIGN_TYPED(zil_maxblocksize, |
3842 | ZIL_MIN_BLKSZ, uint64_t), ZIL_MIN_BLKSZ), | |
3843 | spa_maxblocksize(dmu_objset_spa(os))); | |
34dc7c2f BB |
3844 | |
3845 | mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); | |
1b2b0aca | 3846 | mutex_init(&zilog->zl_issuer_lock, NULL, MUTEX_DEFAULT, NULL); |
152d6fda | 3847 | mutex_init(&zilog->zl_lwb_io_lock, NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f | 3848 | |
1c27024e | 3849 | for (int i = 0; i < TXG_SIZE; i++) { |
572e2857 BB |
3850 | mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL, |
3851 | MUTEX_DEFAULT, NULL); | |
3852 | } | |
34dc7c2f BB |
3853 | |
3854 | list_create(&zilog->zl_lwb_list, sizeof (lwb_t), | |
3855 | offsetof(lwb_t, lwb_node)); | |
3856 | ||
572e2857 BB |
3857 | list_create(&zilog->zl_itx_commit_list, sizeof (itx_t), |
3858 | offsetof(itx_t, itx_node)); | |
3859 | ||
34dc7c2f | 3860 | cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); |
152d6fda | 3861 | cv_init(&zilog->zl_lwb_io_cv, NULL, CV_DEFAULT, NULL); |
34dc7c2f | 3862 | |
eff77a80 AM |
3863 | for (int i = 0; i < ZIL_BURSTS; i++) { |
3864 | zilog->zl_prev_opt[i] = zilog->zl_max_block_size - | |
3865 | sizeof (zil_chain_t); | |
3866 | } | |
3867 | ||
34dc7c2f BB |
3868 | return (zilog); |
3869 | } | |
3870 | ||
3871 | void | |
3872 | zil_free(zilog_t *zilog) | |
3873 | { | |
d6320ddb | 3874 | int i; |
34dc7c2f BB |
3875 | |
3876 | zilog->zl_stop_sync = 1; | |
3877 | ||
13fe0198 MA |
3878 | ASSERT0(zilog->zl_suspend); |
3879 | ASSERT0(zilog->zl_suspending); | |
3880 | ||
3e31d2b0 | 3881 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); |
34dc7c2f BB |
3882 | list_destroy(&zilog->zl_lwb_list); |
3883 | ||
572e2857 BB |
3884 | ASSERT(list_is_empty(&zilog->zl_itx_commit_list)); |
3885 | list_destroy(&zilog->zl_itx_commit_list); | |
3886 | ||
d6320ddb | 3887 | for (i = 0; i < TXG_SIZE; i++) { |
572e2857 BB |
3888 | /* |
3889 | * It's possible for an itx to be generated that doesn't dirty | |
3890 | * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean() | |
3891 | * callback to remove the entry. We remove those here. | |
3892 | * | |
3893 | * Also free up the ziltest itxs. | |
3894 | */ | |
3895 | if (zilog->zl_itxg[i].itxg_itxs) | |
3896 | zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs); | |
3897 | mutex_destroy(&zilog->zl_itxg[i].itxg_lock); | |
3898 | } | |
3899 | ||
1b2b0aca | 3900 | mutex_destroy(&zilog->zl_issuer_lock); |
34dc7c2f | 3901 | mutex_destroy(&zilog->zl_lock); |
152d6fda | 3902 | mutex_destroy(&zilog->zl_lwb_io_lock); |
34dc7c2f | 3903 | |
34dc7c2f | 3904 | cv_destroy(&zilog->zl_cv_suspend); |
152d6fda | 3905 | cv_destroy(&zilog->zl_lwb_io_cv); |
34dc7c2f BB |
3906 | |
3907 | kmem_free(zilog, sizeof (zilog_t)); | |
3908 | } | |
3909 | ||
34dc7c2f BB |
3910 | /* |
3911 | * Open an intent log. | |
3912 | */ | |
3913 | zilog_t * | |
fb087146 | 3914 | zil_open(objset_t *os, zil_get_data_t *get_data, zil_sums_t *zil_sums) |
34dc7c2f BB |
3915 | { |
3916 | zilog_t *zilog = dmu_objset_zil(os); | |
3917 | ||
1ce23dca PS |
3918 | ASSERT3P(zilog->zl_get_data, ==, NULL); |
3919 | ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL); | |
3e31d2b0 ES |
3920 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); |
3921 | ||
34dc7c2f | 3922 | zilog->zl_get_data = get_data; |
fb087146 | 3923 | zilog->zl_sums = zil_sums; |
34dc7c2f BB |
3924 | |
3925 | return (zilog); | |
3926 | } | |
3927 | ||
3928 | /* | |
3929 | * Close an intent log. | |
3930 | */ | |
3931 | void | |
3932 | zil_close(zilog_t *zilog) | |
3933 | { | |
3e31d2b0 | 3934 | lwb_t *lwb; |
1ce23dca | 3935 | uint64_t txg; |
572e2857 | 3936 | |
1ce23dca PS |
3937 | if (!dmu_objset_is_snapshot(zilog->zl_os)) { |
3938 | zil_commit(zilog, 0); | |
3939 | } else { | |
895e0313 | 3940 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); |
1ce23dca PS |
3941 | ASSERT0(zilog->zl_dirty_max_txg); |
3942 | ASSERT3B(zilog_is_dirty(zilog), ==, B_FALSE); | |
3943 | } | |
572e2857 | 3944 | |
572e2857 | 3945 | mutex_enter(&zilog->zl_lock); |
eda3fcd5 | 3946 | txg = zilog->zl_dirty_max_txg; |
3e31d2b0 | 3947 | lwb = list_tail(&zilog->zl_lwb_list); |
eda3fcd5 AM |
3948 | if (lwb != NULL) { |
3949 | txg = MAX(txg, lwb->lwb_alloc_txg); | |
3950 | txg = MAX(txg, lwb->lwb_max_txg); | |
3951 | } | |
572e2857 | 3952 | mutex_exit(&zilog->zl_lock); |
1ce23dca PS |
3953 | |
3954 | /* | |
152d6fda KJ |
3955 | * zl_lwb_max_issued_txg may be larger than lwb_max_txg. It depends |
3956 | * on the time when the dmu_tx transaction is assigned in | |
eda3fcd5 | 3957 | * zil_lwb_write_issue(). |
152d6fda KJ |
3958 | */ |
3959 | mutex_enter(&zilog->zl_lwb_io_lock); | |
3960 | txg = MAX(zilog->zl_lwb_max_issued_txg, txg); | |
3961 | mutex_exit(&zilog->zl_lwb_io_lock); | |
3962 | ||
3963 | /* | |
3964 | * We need to use txg_wait_synced() to wait until that txg is synced. | |
3965 | * zil_sync() will guarantee all lwbs up to that txg have been | |
3966 | * written out, flushed, and cleaned. | |
1ce23dca PS |
3967 | */ |
3968 | if (txg != 0) | |
34dc7c2f | 3969 | txg_wait_synced(zilog->zl_dmu_pool, txg); |
55922e73 GW |
3970 | |
3971 | if (zilog_is_dirty(zilog)) | |
8e739b2c RE |
3972 | zfs_dbgmsg("zil (%px) is dirty, txg %llu", zilog, |
3973 | (u_longlong_t)txg); | |
50c957f7 | 3974 | if (txg < spa_freeze_txg(zilog->zl_spa)) |
55922e73 | 3975 | VERIFY(!zilog_is_dirty(zilog)); |
34dc7c2f | 3976 | |
34dc7c2f | 3977 | zilog->zl_get_data = NULL; |
3e31d2b0 ES |
3978 | |
3979 | /* | |
1ce23dca | 3980 | * We should have only one lwb left on the list; remove it now. |
3e31d2b0 ES |
3981 | */ |
3982 | mutex_enter(&zilog->zl_lock); | |
895e0313 | 3983 | lwb = list_remove_head(&zilog->zl_lwb_list); |
3e31d2b0 | 3984 | if (lwb != NULL) { |
895e0313 | 3985 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); |
eda3fcd5 | 3986 | ASSERT3S(lwb->lwb_state, ==, LWB_STATE_NEW); |
3e31d2b0 | 3987 | zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); |
1ce23dca | 3988 | zil_free_lwb(zilog, lwb); |
3e31d2b0 ES |
3989 | } |
3990 | mutex_exit(&zilog->zl_lock); | |
34dc7c2f BB |
3991 | } |
3992 | ||
a926aab9 | 3993 | static const char *suspend_tag = "zil suspending"; |
13fe0198 | 3994 | |
34dc7c2f BB |
3995 | /* |
3996 | * Suspend an intent log. While in suspended mode, we still honor | |
3997 | * synchronous semantics, but we rely on txg_wait_synced() to do it. | |
13fe0198 MA |
3998 | * On old version pools, we suspend the log briefly when taking a |
3999 | * snapshot so that it will have an empty intent log. | |
4000 | * | |
4001 | * Long holds are not really intended to be used the way we do here -- | |
4002 | * held for such a short time. A concurrent caller of dsl_dataset_long_held() | |
4003 | * could fail. Therefore we take pains to only put a long hold if it is | |
4004 | * actually necessary. Fortunately, it will only be necessary if the | |
4005 | * objset is currently mounted (or the ZVOL equivalent). In that case it | |
4006 | * will already have a long hold, so we are not really making things any worse. | |
4007 | * | |
4008 | * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or | |
4009 | * zvol_state_t), and use their mechanism to prevent their hold from being | |
4010 | * dropped (e.g. VFS_HOLD()). However, that would be even more pain for | |
4011 | * very little gain. | |
4012 | * | |
4013 | * if cookiep == NULL, this does both the suspend & resume. | |
4014 | * Otherwise, it returns with the dataset "long held", and the cookie | |
4015 | * should be passed into zil_resume(). | |
34dc7c2f BB |
4016 | */ |
4017 | int | |
13fe0198 | 4018 | zil_suspend(const char *osname, void **cookiep) |
34dc7c2f | 4019 | { |
13fe0198 MA |
4020 | objset_t *os; |
4021 | zilog_t *zilog; | |
4022 | const zil_header_t *zh; | |
4023 | int error; | |
4024 | ||
4025 | error = dmu_objset_hold(osname, suspend_tag, &os); | |
4026 | if (error != 0) | |
4027 | return (error); | |
4028 | zilog = dmu_objset_zil(os); | |
34dc7c2f BB |
4029 | |
4030 | mutex_enter(&zilog->zl_lock); | |
13fe0198 MA |
4031 | zh = zilog->zl_header; |
4032 | ||
9babb374 | 4033 | if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */ |
34dc7c2f | 4034 | mutex_exit(&zilog->zl_lock); |
13fe0198 | 4035 | dmu_objset_rele(os, suspend_tag); |
2e528b49 | 4036 | return (SET_ERROR(EBUSY)); |
34dc7c2f | 4037 | } |
13fe0198 MA |
4038 | |
4039 | /* | |
4040 | * Don't put a long hold in the cases where we can avoid it. This | |
4041 | * is when there is no cookie so we are doing a suspend & resume | |
4042 | * (i.e. called from zil_vdev_offline()), and there's nothing to do | |
4043 | * for the suspend because it's already suspended, or there's no ZIL. | |
4044 | */ | |
4045 | if (cookiep == NULL && !zilog->zl_suspending && | |
4046 | (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) { | |
4047 | mutex_exit(&zilog->zl_lock); | |
4048 | dmu_objset_rele(os, suspend_tag); | |
4049 | return (0); | |
4050 | } | |
4051 | ||
4052 | dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag); | |
4053 | dsl_pool_rele(dmu_objset_pool(os), suspend_tag); | |
4054 | ||
4055 | zilog->zl_suspend++; | |
4056 | ||
4057 | if (zilog->zl_suspend > 1) { | |
34dc7c2f | 4058 | /* |
13fe0198 | 4059 | * Someone else is already suspending it. |
34dc7c2f BB |
4060 | * Just wait for them to finish. |
4061 | */ | |
13fe0198 | 4062 | |
34dc7c2f BB |
4063 | while (zilog->zl_suspending) |
4064 | cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); | |
34dc7c2f | 4065 | mutex_exit(&zilog->zl_lock); |
13fe0198 MA |
4066 | |
4067 | if (cookiep == NULL) | |
4068 | zil_resume(os); | |
4069 | else | |
4070 | *cookiep = os; | |
4071 | return (0); | |
4072 | } | |
4073 | ||
4074 | /* | |
4075 | * If there is no pointer to an on-disk block, this ZIL must not | |
4076 | * be active (e.g. filesystem not mounted), so there's nothing | |
4077 | * to clean up. | |
4078 | */ | |
4079 | if (BP_IS_HOLE(&zh->zh_log)) { | |
4080 | ASSERT(cookiep != NULL); /* fast path already handled */ | |
4081 | ||
4082 | *cookiep = os; | |
4083 | mutex_exit(&zilog->zl_lock); | |
34dc7c2f BB |
4084 | return (0); |
4085 | } | |
13fe0198 | 4086 | |
4807c0ba TC |
4087 | /* |
4088 | * The ZIL has work to do. Ensure that the associated encryption | |
4089 | * key will remain mapped while we are committing the log by | |
4090 | * grabbing a reference to it. If the key isn't loaded we have no | |
4091 | * choice but to return an error until the wrapping key is loaded. | |
4092 | */ | |
52ce99dd TC |
4093 | if (os->os_encrypted && |
4094 | dsl_dataset_create_key_mapping(dmu_objset_ds(os)) != 0) { | |
4807c0ba TC |
4095 | zilog->zl_suspend--; |
4096 | mutex_exit(&zilog->zl_lock); | |
4097 | dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); | |
4098 | dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); | |
2ffd89fc | 4099 | return (SET_ERROR(EACCES)); |
4807c0ba TC |
4100 | } |
4101 | ||
34dc7c2f BB |
4102 | zilog->zl_suspending = B_TRUE; |
4103 | mutex_exit(&zilog->zl_lock); | |
4104 | ||
2fe61a7e PS |
4105 | /* |
4106 | * We need to use zil_commit_impl to ensure we wait for all | |
eda3fcd5 | 4107 | * LWB_STATE_OPENED, _CLOSED and _READY lwbs to be committed |
2fe61a7e PS |
4108 | * to disk before proceeding. If we used zil_commit instead, it |
4109 | * would just call txg_wait_synced(), because zl_suspend is set. | |
4110 | * txg_wait_synced() doesn't wait for these lwb's to be | |
900d09b2 | 4111 | * LWB_STATE_FLUSH_DONE before returning. |
2fe61a7e PS |
4112 | */ |
4113 | zil_commit_impl(zilog, 0); | |
4114 | ||
4115 | /* | |
900d09b2 PS |
4116 | * Now that we've ensured all lwb's are LWB_STATE_FLUSH_DONE, we |
4117 | * use txg_wait_synced() to ensure the data from the zilog has | |
4118 | * migrated to the main pool before calling zil_destroy(). | |
2fe61a7e PS |
4119 | */ |
4120 | txg_wait_synced(zilog->zl_dmu_pool, 0); | |
34dc7c2f BB |
4121 | |
4122 | zil_destroy(zilog, B_FALSE); | |
4123 | ||
4124 | mutex_enter(&zilog->zl_lock); | |
4125 | zilog->zl_suspending = B_FALSE; | |
4126 | cv_broadcast(&zilog->zl_cv_suspend); | |
4127 | mutex_exit(&zilog->zl_lock); | |
4128 | ||
52ce99dd TC |
4129 | if (os->os_encrypted) |
4130 | dsl_dataset_remove_key_mapping(dmu_objset_ds(os)); | |
4807c0ba | 4131 | |
13fe0198 MA |
4132 | if (cookiep == NULL) |
4133 | zil_resume(os); | |
4134 | else | |
4135 | *cookiep = os; | |
34dc7c2f BB |
4136 | return (0); |
4137 | } | |
4138 | ||
4139 | void | |
13fe0198 | 4140 | zil_resume(void *cookie) |
34dc7c2f | 4141 | { |
13fe0198 MA |
4142 | objset_t *os = cookie; |
4143 | zilog_t *zilog = dmu_objset_zil(os); | |
4144 | ||
34dc7c2f BB |
4145 | mutex_enter(&zilog->zl_lock); |
4146 | ASSERT(zilog->zl_suspend != 0); | |
4147 | zilog->zl_suspend--; | |
4148 | mutex_exit(&zilog->zl_lock); | |
13fe0198 MA |
4149 | dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); |
4150 | dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); | |
34dc7c2f BB |
4151 | } |
4152 | ||
4153 | typedef struct zil_replay_arg { | |
18168da7 | 4154 | zil_replay_func_t *const *zr_replay; |
34dc7c2f | 4155 | void *zr_arg; |
34dc7c2f | 4156 | boolean_t zr_byteswap; |
428870ff | 4157 | char *zr_lr; |
34dc7c2f BB |
4158 | } zil_replay_arg_t; |
4159 | ||
428870ff | 4160 | static int |
61868bb1 | 4161 | zil_replay_error(zilog_t *zilog, const lr_t *lr, int error) |
428870ff | 4162 | { |
eca7b760 | 4163 | char name[ZFS_MAX_DATASET_NAME_LEN]; |
428870ff BB |
4164 | |
4165 | zilog->zl_replaying_seq--; /* didn't actually replay this one */ | |
4166 | ||
4167 | dmu_objset_name(zilog->zl_os, name); | |
4168 | ||
4169 | cmn_err(CE_WARN, "ZFS replay transaction error %d, " | |
4170 | "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name, | |
4171 | (u_longlong_t)lr->lrc_seq, | |
4172 | (u_longlong_t)(lr->lrc_txtype & ~TX_CI), | |
4173 | (lr->lrc_txtype & TX_CI) ? "CI" : ""); | |
4174 | ||
4175 | return (error); | |
4176 | } | |
4177 | ||
4178 | static int | |
61868bb1 CS |
4179 | zil_replay_log_record(zilog_t *zilog, const lr_t *lr, void *zra, |
4180 | uint64_t claim_txg) | |
34dc7c2f BB |
4181 | { |
4182 | zil_replay_arg_t *zr = zra; | |
4183 | const zil_header_t *zh = zilog->zl_header; | |
4184 | uint64_t reclen = lr->lrc_reclen; | |
4185 | uint64_t txtype = lr->lrc_txtype; | |
428870ff | 4186 | int error = 0; |
34dc7c2f | 4187 | |
428870ff | 4188 | zilog->zl_replaying_seq = lr->lrc_seq; |
34dc7c2f BB |
4189 | |
4190 | if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ | |
428870ff BB |
4191 | return (0); |
4192 | ||
4193 | if (lr->lrc_txg < claim_txg) /* already committed */ | |
4194 | return (0); | |
34dc7c2f BB |
4195 | |
4196 | /* Strip case-insensitive bit, still present in log record */ | |
4197 | txtype &= ~TX_CI; | |
4198 | ||
428870ff BB |
4199 | if (txtype == 0 || txtype >= TX_MAX_TYPE) |
4200 | return (zil_replay_error(zilog, lr, EINVAL)); | |
4201 | ||
4202 | /* | |
4203 | * If this record type can be logged out of order, the object | |
4204 | * (lr_foid) may no longer exist. That's legitimate, not an error. | |
4205 | */ | |
4206 | if (TX_OOO(txtype)) { | |
4207 | error = dmu_object_info(zilog->zl_os, | |
50c957f7 | 4208 | LR_FOID_GET_OBJ(((lr_ooo_t *)lr)->lr_foid), NULL); |
428870ff BB |
4209 | if (error == ENOENT || error == EEXIST) |
4210 | return (0); | |
fb5f0bc8 BB |
4211 | } |
4212 | ||
34dc7c2f BB |
4213 | /* |
4214 | * Make a copy of the data so we can revise and extend it. | |
4215 | */ | |
861166b0 | 4216 | memcpy(zr->zr_lr, lr, reclen); |
428870ff BB |
4217 | |
4218 | /* | |
4219 | * If this is a TX_WRITE with a blkptr, suck in the data. | |
4220 | */ | |
4221 | if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { | |
4222 | error = zil_read_log_data(zilog, (lr_write_t *)lr, | |
4223 | zr->zr_lr + reclen); | |
13fe0198 | 4224 | if (error != 0) |
428870ff BB |
4225 | return (zil_replay_error(zilog, lr, error)); |
4226 | } | |
34dc7c2f BB |
4227 | |
4228 | /* | |
4229 | * The log block containing this lr may have been byteswapped | |
4230 | * so that we can easily examine common fields like lrc_txtype. | |
428870ff | 4231 | * However, the log is a mix of different record types, and only the |
34dc7c2f BB |
4232 | * replay vectors know how to byteswap their records. Therefore, if |
4233 | * the lr was byteswapped, undo it before invoking the replay vector. | |
4234 | */ | |
4235 | if (zr->zr_byteswap) | |
428870ff | 4236 | byteswap_uint64_array(zr->zr_lr, reclen); |
34dc7c2f BB |
4237 | |
4238 | /* | |
4239 | * We must now do two things atomically: replay this log record, | |
fb5f0bc8 BB |
4240 | * and update the log header sequence number to reflect the fact that |
4241 | * we did so. At the end of each replay function the sequence number | |
4242 | * is updated if we are in replay mode. | |
34dc7c2f | 4243 | */ |
428870ff | 4244 | error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap); |
13fe0198 | 4245 | if (error != 0) { |
34dc7c2f BB |
4246 | /* |
4247 | * The DMU's dnode layer doesn't see removes until the txg | |
4248 | * commits, so a subsequent claim can spuriously fail with | |
fb5f0bc8 | 4249 | * EEXIST. So if we receive any error we try syncing out |
428870ff BB |
4250 | * any removes then retry the transaction. Note that we |
4251 | * specify B_FALSE for byteswap now, so we don't do it twice. | |
34dc7c2f | 4252 | */ |
428870ff BB |
4253 | txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); |
4254 | error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE); | |
13fe0198 | 4255 | if (error != 0) |
428870ff | 4256 | return (zil_replay_error(zilog, lr, error)); |
34dc7c2f | 4257 | } |
428870ff | 4258 | return (0); |
34dc7c2f BB |
4259 | } |
4260 | ||
428870ff | 4261 | static int |
61868bb1 | 4262 | zil_incr_blks(zilog_t *zilog, const blkptr_t *bp, void *arg, uint64_t claim_txg) |
34dc7c2f | 4263 | { |
14e4e3cb AZ |
4264 | (void) bp, (void) arg, (void) claim_txg; |
4265 | ||
34dc7c2f | 4266 | zilog->zl_replay_blks++; |
428870ff BB |
4267 | |
4268 | return (0); | |
34dc7c2f BB |
4269 | } |
4270 | ||
4271 | /* | |
4272 | * If this dataset has a non-empty intent log, replay it and destroy it. | |
e197bb24 | 4273 | * Return B_TRUE if there were any entries to replay. |
34dc7c2f | 4274 | */ |
e197bb24 | 4275 | boolean_t |
18168da7 AZ |
4276 | zil_replay(objset_t *os, void *arg, |
4277 | zil_replay_func_t *const replay_func[TX_MAX_TYPE]) | |
34dc7c2f BB |
4278 | { |
4279 | zilog_t *zilog = dmu_objset_zil(os); | |
4280 | const zil_header_t *zh = zilog->zl_header; | |
4281 | zil_replay_arg_t zr; | |
4282 | ||
9babb374 | 4283 | if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) { |
e197bb24 | 4284 | return (zil_destroy(zilog, B_TRUE)); |
34dc7c2f BB |
4285 | } |
4286 | ||
34dc7c2f BB |
4287 | zr.zr_replay = replay_func; |
4288 | zr.zr_arg = arg; | |
34dc7c2f | 4289 | zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); |
79c76d5b | 4290 | zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); |
34dc7c2f BB |
4291 | |
4292 | /* | |
4293 | * Wait for in-progress removes to sync before starting replay. | |
4294 | */ | |
4295 | txg_wait_synced(zilog->zl_dmu_pool, 0); | |
4296 | ||
fb5f0bc8 | 4297 | zilog->zl_replay = B_TRUE; |
428870ff | 4298 | zilog->zl_replay_time = ddi_get_lbolt(); |
34dc7c2f BB |
4299 | ASSERT(zilog->zl_replay_blks == 0); |
4300 | (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, | |
b5256303 | 4301 | zh->zh_claim_txg, B_TRUE); |
00b46022 | 4302 | vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE); |
34dc7c2f BB |
4303 | |
4304 | zil_destroy(zilog, B_FALSE); | |
4305 | txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); | |
fb5f0bc8 | 4306 | zilog->zl_replay = B_FALSE; |
e197bb24 AS |
4307 | |
4308 | return (B_TRUE); | |
34dc7c2f BB |
4309 | } |
4310 | ||
428870ff BB |
4311 | boolean_t |
4312 | zil_replaying(zilog_t *zilog, dmu_tx_t *tx) | |
34dc7c2f | 4313 | { |
428870ff BB |
4314 | if (zilog->zl_sync == ZFS_SYNC_DISABLED) |
4315 | return (B_TRUE); | |
34dc7c2f | 4316 | |
428870ff BB |
4317 | if (zilog->zl_replay) { |
4318 | dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); | |
4319 | zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] = | |
4320 | zilog->zl_replaying_seq; | |
4321 | return (B_TRUE); | |
34dc7c2f BB |
4322 | } |
4323 | ||
428870ff | 4324 | return (B_FALSE); |
34dc7c2f | 4325 | } |
9babb374 | 4326 | |
9babb374 | 4327 | int |
a1d477c2 | 4328 | zil_reset(const char *osname, void *arg) |
9babb374 | 4329 | { |
14e4e3cb | 4330 | (void) arg; |
9babb374 | 4331 | |
14e4e3cb | 4332 | int error = zil_suspend(osname, NULL); |
2ffd89fc PZ |
4333 | /* EACCES means crypto key not loaded */ |
4334 | if ((error == EACCES) || (error == EBUSY)) | |
4335 | return (SET_ERROR(error)); | |
13fe0198 | 4336 | if (error != 0) |
2e528b49 | 4337 | return (SET_ERROR(EEXIST)); |
13fe0198 | 4338 | return (0); |
9babb374 | 4339 | } |
c409e464 | 4340 | |
0f699108 AZ |
4341 | EXPORT_SYMBOL(zil_alloc); |
4342 | EXPORT_SYMBOL(zil_free); | |
4343 | EXPORT_SYMBOL(zil_open); | |
4344 | EXPORT_SYMBOL(zil_close); | |
4345 | EXPORT_SYMBOL(zil_replay); | |
4346 | EXPORT_SYMBOL(zil_replaying); | |
4347 | EXPORT_SYMBOL(zil_destroy); | |
4348 | EXPORT_SYMBOL(zil_destroy_sync); | |
4349 | EXPORT_SYMBOL(zil_itx_create); | |
4350 | EXPORT_SYMBOL(zil_itx_destroy); | |
4351 | EXPORT_SYMBOL(zil_itx_assign); | |
4352 | EXPORT_SYMBOL(zil_commit); | |
0f699108 AZ |
4353 | EXPORT_SYMBOL(zil_claim); |
4354 | EXPORT_SYMBOL(zil_check_log_chain); | |
4355 | EXPORT_SYMBOL(zil_sync); | |
4356 | EXPORT_SYMBOL(zil_clean); | |
4357 | EXPORT_SYMBOL(zil_suspend); | |
4358 | EXPORT_SYMBOL(zil_resume); | |
1ce23dca | 4359 | EXPORT_SYMBOL(zil_lwb_add_block); |
0f699108 AZ |
4360 | EXPORT_SYMBOL(zil_bp_tree_add); |
4361 | EXPORT_SYMBOL(zil_set_sync); | |
4362 | EXPORT_SYMBOL(zil_set_logbias); | |
fb087146 AH |
4363 | EXPORT_SYMBOL(zil_sums_init); |
4364 | EXPORT_SYMBOL(zil_sums_fini); | |
4365 | EXPORT_SYMBOL(zil_kstat_values_update); | |
0f699108 | 4366 | |
fdc2d303 | 4367 | ZFS_MODULE_PARAM(zfs, zfs_, commit_timeout_pct, UINT, ZMOD_RW, |
03fdcb9a | 4368 | "ZIL block open timeout percentage"); |
2fe61a7e | 4369 | |
03fdcb9a MM |
4370 | ZFS_MODULE_PARAM(zfs_zil, zil_, replay_disable, INT, ZMOD_RW, |
4371 | "Disable intent logging replay"); | |
c409e464 | 4372 | |
03fdcb9a MM |
4373 | ZFS_MODULE_PARAM(zfs_zil, zil_, nocacheflush, INT, ZMOD_RW, |
4374 | "Disable ZIL cache flushes"); | |
ee191e80 | 4375 | |
ab8d9c17 | 4376 | ZFS_MODULE_PARAM(zfs_zil, zil_, slog_bulk, U64, ZMOD_RW, |
03fdcb9a | 4377 | "Limit in bytes slog sync writes per commit"); |
b8738257 | 4378 | |
fdc2d303 | 4379 | ZFS_MODULE_PARAM(zfs_zil, zil_, maxblocksize, UINT, ZMOD_RW, |
03fdcb9a | 4380 | "Limit in bytes of ZIL log block size"); |
66b81b34 AM |
4381 | |
4382 | ZFS_MODULE_PARAM(zfs_zil, zil_, maxcopied, UINT, ZMOD_RW, | |
4383 | "Limit in bytes WR_COPIED size"); |