]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
1d3ba0bf | 9 | * or https://opensource.org/licenses/CDDL-1.0. |
34dc7c2f BB |
10 | * See the License for the specific language governing permissions |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
492f64e9 | 23 | * Copyright (c) 2011, 2018 by Delphix. All rights reserved. |
55922e73 | 24 | * Copyright (c) 2014 Integros [integros.com] |
2ffd89fc | 25 | * Copyright (c) 2018 Datto Inc. |
34dc7c2f BB |
26 | */ |
27 | ||
428870ff BB |
28 | /* Portions Copyright 2010 Robert Milkowski */ |
29 | ||
34dc7c2f BB |
30 | #include <sys/zfs_context.h> |
31 | #include <sys/spa.h> | |
d2734cce | 32 | #include <sys/spa_impl.h> |
34dc7c2f BB |
33 | #include <sys/dmu.h> |
34 | #include <sys/zap.h> | |
35 | #include <sys/arc.h> | |
36 | #include <sys/stat.h> | |
34dc7c2f BB |
37 | #include <sys/zil.h> |
38 | #include <sys/zil_impl.h> | |
39 | #include <sys/dsl_dataset.h> | |
572e2857 | 40 | #include <sys/vdev_impl.h> |
34dc7c2f | 41 | #include <sys/dmu_tx.h> |
428870ff | 42 | #include <sys/dsl_pool.h> |
920dd524 | 43 | #include <sys/metaslab.h> |
e5d1c27e | 44 | #include <sys/trace_zfs.h> |
a6255b7f | 45 | #include <sys/abd.h> |
67a1b037 | 46 | #include <sys/brt.h> |
fb087146 | 47 | #include <sys/wmsum.h> |
34dc7c2f BB |
48 | |
49 | /* | |
1ce23dca PS |
50 | * The ZFS Intent Log (ZIL) saves "transaction records" (itxs) of system |
51 | * calls that change the file system. Each itx has enough information to | |
52 | * be able to replay them after a system crash, power loss, or | |
53 | * equivalent failure mode. These are stored in memory until either: | |
34dc7c2f | 54 | * |
1ce23dca PS |
55 | * 1. they are committed to the pool by the DMU transaction group |
56 | * (txg), at which point they can be discarded; or | |
57 | * 2. they are committed to the on-disk ZIL for the dataset being | |
58 | * modified (e.g. due to an fsync, O_DSYNC, or other synchronous | |
59 | * requirement). | |
34dc7c2f | 60 | * |
1ce23dca PS |
61 | * In the event of a crash or power loss, the itxs contained by each |
62 | * dataset's on-disk ZIL will be replayed when that dataset is first | |
e1cfd73f | 63 | * instantiated (e.g. if the dataset is a normal filesystem, when it is |
1ce23dca | 64 | * first mounted). |
34dc7c2f | 65 | * |
1ce23dca PS |
66 | * As hinted at above, there is one ZIL per dataset (both the in-memory |
67 | * representation, and the on-disk representation). The on-disk format | |
68 | * consists of 3 parts: | |
69 | * | |
70 | * - a single, per-dataset, ZIL header; which points to a chain of | |
71 | * - zero or more ZIL blocks; each of which contains | |
72 | * - zero or more ZIL records | |
73 | * | |
74 | * A ZIL record holds the information necessary to replay a single | |
75 | * system call transaction. A ZIL block can hold many ZIL records, and | |
76 | * the blocks are chained together, similarly to a singly linked list. | |
77 | * | |
78 | * Each ZIL block contains a block pointer (blkptr_t) to the next ZIL | |
79 | * block in the chain, and the ZIL header points to the first block in | |
80 | * the chain. | |
81 | * | |
82 | * Note, there is not a fixed place in the pool to hold these ZIL | |
83 | * blocks; they are dynamically allocated and freed as needed from the | |
84 | * blocks available on the pool, though they can be preferentially | |
85 | * allocated from a dedicated "log" vdev. | |
34dc7c2f BB |
86 | */ |
87 | ||
1ce23dca PS |
88 | /* |
89 | * This controls the amount of time that a ZIL block (lwb) will remain | |
90 | * "open" when it isn't "full", and it has a thread waiting for it to be | |
91 | * committed to stable storage. Please refer to the zil_commit_waiter() | |
92 | * function (and the comments within it) for more details. | |
93 | */ | |
fdc2d303 | 94 | static uint_t zfs_commit_timeout_pct = 5; |
1ce23dca | 95 | |
0f740a4f AM |
96 | /* |
97 | * Minimal time we care to delay commit waiting for more ZIL records. | |
98 | * At least FreeBSD kernel can't sleep for less than 2us at its best. | |
99 | * So requests to sleep for less then 5us is a waste of CPU time with | |
100 | * a risk of significant log latency increase due to oversleep. | |
101 | */ | |
102 | static uint64_t zil_min_commit_timeout = 5000; | |
103 | ||
b6ad9671 ED |
104 | /* |
105 | * See zil.h for more information about these fields. | |
106 | */ | |
fb087146 | 107 | static zil_kstat_values_t zil_stats = { |
d1d7e268 MK |
108 | { "zil_commit_count", KSTAT_DATA_UINT64 }, |
109 | { "zil_commit_writer_count", KSTAT_DATA_UINT64 }, | |
110 | { "zil_itx_count", KSTAT_DATA_UINT64 }, | |
111 | { "zil_itx_indirect_count", KSTAT_DATA_UINT64 }, | |
112 | { "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 }, | |
113 | { "zil_itx_copied_count", KSTAT_DATA_UINT64 }, | |
114 | { "zil_itx_copied_bytes", KSTAT_DATA_UINT64 }, | |
115 | { "zil_itx_needcopy_count", KSTAT_DATA_UINT64 }, | |
116 | { "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 }, | |
117 | { "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 }, | |
118 | { "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 }, | |
b6fbe61f AM |
119 | { "zil_itx_metaslab_normal_write", KSTAT_DATA_UINT64 }, |
120 | { "zil_itx_metaslab_normal_alloc", KSTAT_DATA_UINT64 }, | |
d1d7e268 MK |
121 | { "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 }, |
122 | { "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 }, | |
b6fbe61f AM |
123 | { "zil_itx_metaslab_slog_write", KSTAT_DATA_UINT64 }, |
124 | { "zil_itx_metaslab_slog_alloc", KSTAT_DATA_UINT64 }, | |
b6ad9671 ED |
125 | }; |
126 | ||
fb087146 AH |
127 | static zil_sums_t zil_sums_global; |
128 | static kstat_t *zil_kstats_global; | |
b6ad9671 | 129 | |
34dc7c2f | 130 | /* |
d3cc8b15 | 131 | * Disable intent logging replay. This global ZIL switch affects all pools. |
34dc7c2f | 132 | */ |
d3cc8b15 | 133 | int zil_replay_disable = 0; |
34dc7c2f BB |
134 | |
135 | /* | |
53b1f5ea PS |
136 | * Disable the DKIOCFLUSHWRITECACHE commands that are normally sent to |
137 | * the disk(s) by the ZIL after an LWB write has completed. Setting this | |
138 | * will cause ZIL corruption on power loss if a volatile out-of-order | |
139 | * write cache is enabled. | |
34dc7c2f | 140 | */ |
18168da7 | 141 | static int zil_nocacheflush = 0; |
34dc7c2f | 142 | |
1b7c1e5c GDN |
143 | /* |
144 | * Limit SLOG write size per commit executed with synchronous priority. | |
145 | * Any writes above that will be executed with lower (asynchronous) priority | |
146 | * to limit potential SLOG device abuse by single active ZIL writer. | |
147 | */ | |
ab8d9c17 | 148 | static uint64_t zil_slog_bulk = 768 * 1024; |
1b7c1e5c | 149 | |
34dc7c2f | 150 | static kmem_cache_t *zil_lwb_cache; |
1ce23dca | 151 | static kmem_cache_t *zil_zcw_cache; |
34dc7c2f | 152 | |
f63811f0 | 153 | static void zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx); |
2cb992a9 | 154 | static void zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb); |
f63811f0 AM |
155 | static itx_t *zil_itx_clone(itx_t *oitx); |
156 | ||
34dc7c2f | 157 | static int |
428870ff | 158 | zil_bp_compare(const void *x1, const void *x2) |
34dc7c2f | 159 | { |
428870ff BB |
160 | const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva; |
161 | const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva; | |
34dc7c2f | 162 | |
ca577779 | 163 | int cmp = TREE_CMP(DVA_GET_VDEV(dva1), DVA_GET_VDEV(dva2)); |
ee36c709 GN |
164 | if (likely(cmp)) |
165 | return (cmp); | |
34dc7c2f | 166 | |
ca577779 | 167 | return (TREE_CMP(DVA_GET_OFFSET(dva1), DVA_GET_OFFSET(dva2))); |
34dc7c2f BB |
168 | } |
169 | ||
170 | static void | |
428870ff | 171 | zil_bp_tree_init(zilog_t *zilog) |
34dc7c2f | 172 | { |
428870ff BB |
173 | avl_create(&zilog->zl_bp_tree, zil_bp_compare, |
174 | sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node)); | |
34dc7c2f BB |
175 | } |
176 | ||
177 | static void | |
428870ff | 178 | zil_bp_tree_fini(zilog_t *zilog) |
34dc7c2f | 179 | { |
428870ff BB |
180 | avl_tree_t *t = &zilog->zl_bp_tree; |
181 | zil_bp_node_t *zn; | |
34dc7c2f BB |
182 | void *cookie = NULL; |
183 | ||
184 | while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) | |
428870ff | 185 | kmem_free(zn, sizeof (zil_bp_node_t)); |
34dc7c2f BB |
186 | |
187 | avl_destroy(t); | |
188 | } | |
189 | ||
428870ff BB |
190 | int |
191 | zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp) | |
34dc7c2f | 192 | { |
428870ff | 193 | avl_tree_t *t = &zilog->zl_bp_tree; |
9b67f605 | 194 | const dva_t *dva; |
428870ff | 195 | zil_bp_node_t *zn; |
34dc7c2f BB |
196 | avl_index_t where; |
197 | ||
9b67f605 MA |
198 | if (BP_IS_EMBEDDED(bp)) |
199 | return (0); | |
200 | ||
201 | dva = BP_IDENTITY(bp); | |
202 | ||
34dc7c2f | 203 | if (avl_find(t, dva, &where) != NULL) |
2e528b49 | 204 | return (SET_ERROR(EEXIST)); |
34dc7c2f | 205 | |
79c76d5b | 206 | zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP); |
34dc7c2f BB |
207 | zn->zn_dva = *dva; |
208 | avl_insert(t, zn, where); | |
209 | ||
210 | return (0); | |
211 | } | |
212 | ||
213 | static zil_header_t * | |
214 | zil_header_in_syncing_context(zilog_t *zilog) | |
215 | { | |
216 | return ((zil_header_t *)zilog->zl_header); | |
217 | } | |
218 | ||
219 | static void | |
220 | zil_init_log_chain(zilog_t *zilog, blkptr_t *bp) | |
221 | { | |
222 | zio_cksum_t *zc = &bp->blk_cksum; | |
223 | ||
29274c9f AM |
224 | (void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_0], |
225 | sizeof (zc->zc_word[ZIL_ZC_GUID_0])); | |
226 | (void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_1], | |
227 | sizeof (zc->zc_word[ZIL_ZC_GUID_1])); | |
34dc7c2f BB |
228 | zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os); |
229 | zc->zc_word[ZIL_ZC_SEQ] = 1ULL; | |
230 | } | |
231 | ||
fb087146 AH |
232 | static int |
233 | zil_kstats_global_update(kstat_t *ksp, int rw) | |
234 | { | |
235 | zil_kstat_values_t *zs = ksp->ks_data; | |
236 | ASSERT3P(&zil_stats, ==, zs); | |
237 | ||
238 | if (rw == KSTAT_WRITE) { | |
239 | return (SET_ERROR(EACCES)); | |
240 | } | |
241 | ||
242 | zil_kstat_values_update(zs, &zil_sums_global); | |
243 | ||
244 | return (0); | |
245 | } | |
246 | ||
34dc7c2f | 247 | /* |
428870ff | 248 | * Read a log block and make sure it's valid. |
34dc7c2f BB |
249 | */ |
250 | static int | |
b5256303 | 251 | zil_read_log_block(zilog_t *zilog, boolean_t decrypt, const blkptr_t *bp, |
482da24e | 252 | blkptr_t *nbp, char **begin, char **end, arc_buf_t **abuf) |
34dc7c2f | 253 | { |
4938d01d | 254 | zio_flag_t zio_flags = ZIO_FLAG_CANFAIL; |
2a432414 | 255 | arc_flags_t aflags = ARC_FLAG_WAIT; |
5dbd68a3 | 256 | zbookmark_phys_t zb; |
34dc7c2f BB |
257 | int error; |
258 | ||
428870ff BB |
259 | if (zilog->zl_header->zh_claim_txg == 0) |
260 | zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; | |
34dc7c2f | 261 | |
428870ff BB |
262 | if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) |
263 | zio_flags |= ZIO_FLAG_SPECULATIVE; | |
34dc7c2f | 264 | |
b5256303 TC |
265 | if (!decrypt) |
266 | zio_flags |= ZIO_FLAG_RAW; | |
267 | ||
428870ff BB |
268 | SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET], |
269 | ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); | |
270 | ||
b5256303 | 271 | error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, |
482da24e | 272 | abuf, ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); |
34dc7c2f BB |
273 | |
274 | if (error == 0) { | |
34dc7c2f BB |
275 | zio_cksum_t cksum = bp->blk_cksum; |
276 | ||
277 | /* | |
b128c09f BB |
278 | * Validate the checksummed log block. |
279 | * | |
34dc7c2f BB |
280 | * Sequence numbers should be... sequential. The checksum |
281 | * verifier for the next block should be bp's checksum plus 1. | |
b128c09f BB |
282 | * |
283 | * Also check the log chain linkage and size used. | |
34dc7c2f BB |
284 | */ |
285 | cksum.zc_word[ZIL_ZC_SEQ]++; | |
286 | ||
482da24e | 287 | uint64_t size = BP_GET_LSIZE(bp); |
428870ff | 288 | if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { |
482da24e | 289 | zil_chain_t *zilc = (*abuf)->b_data; |
428870ff | 290 | char *lr = (char *)(zilc + 1); |
34dc7c2f | 291 | |
861166b0 | 292 | if (memcmp(&cksum, &zilc->zc_next_blk.blk_cksum, |
482da24e AM |
293 | sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) || |
294 | zilc->zc_nused < sizeof (*zilc) || | |
295 | zilc->zc_nused > size) { | |
2e528b49 | 296 | error = SET_ERROR(ECKSUM); |
428870ff | 297 | } else { |
482da24e AM |
298 | *begin = lr; |
299 | *end = lr + zilc->zc_nused - sizeof (*zilc); | |
428870ff BB |
300 | *nbp = zilc->zc_next_blk; |
301 | } | |
302 | } else { | |
482da24e | 303 | char *lr = (*abuf)->b_data; |
428870ff BB |
304 | zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1; |
305 | ||
861166b0 | 306 | if (memcmp(&cksum, &zilc->zc_next_blk.blk_cksum, |
428870ff BB |
307 | sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) || |
308 | (zilc->zc_nused > (size - sizeof (*zilc)))) { | |
2e528b49 | 309 | error = SET_ERROR(ECKSUM); |
428870ff | 310 | } else { |
482da24e AM |
311 | *begin = lr; |
312 | *end = lr + zilc->zc_nused; | |
428870ff BB |
313 | *nbp = zilc->zc_next_blk; |
314 | } | |
34dc7c2f | 315 | } |
428870ff BB |
316 | } |
317 | ||
318 | return (error); | |
319 | } | |
320 | ||
321 | /* | |
322 | * Read a TX_WRITE log data block. | |
323 | */ | |
324 | static int | |
325 | zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf) | |
326 | { | |
4938d01d | 327 | zio_flag_t zio_flags = ZIO_FLAG_CANFAIL; |
428870ff | 328 | const blkptr_t *bp = &lr->lr_blkptr; |
2a432414 | 329 | arc_flags_t aflags = ARC_FLAG_WAIT; |
428870ff | 330 | arc_buf_t *abuf = NULL; |
5dbd68a3 | 331 | zbookmark_phys_t zb; |
428870ff BB |
332 | int error; |
333 | ||
334 | if (BP_IS_HOLE(bp)) { | |
335 | if (wbuf != NULL) | |
861166b0 | 336 | memset(wbuf, 0, MAX(BP_GET_LSIZE(bp), lr->lr_length)); |
428870ff | 337 | return (0); |
34dc7c2f BB |
338 | } |
339 | ||
428870ff BB |
340 | if (zilog->zl_header->zh_claim_txg == 0) |
341 | zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB; | |
342 | ||
b5256303 TC |
343 | /* |
344 | * If we are not using the resulting data, we are just checking that | |
345 | * it hasn't been corrupted so we don't need to waste CPU time | |
346 | * decompressing and decrypting it. | |
347 | */ | |
348 | if (wbuf == NULL) | |
349 | zio_flags |= ZIO_FLAG_RAW; | |
350 | ||
a6ccb36b | 351 | ASSERT3U(BP_GET_LSIZE(bp), !=, 0); |
428870ff BB |
352 | SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid, |
353 | ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); | |
354 | ||
294f6806 | 355 | error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf, |
428870ff BB |
356 | ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb); |
357 | ||
358 | if (error == 0) { | |
359 | if (wbuf != NULL) | |
861166b0 | 360 | memcpy(wbuf, abuf->b_data, arc_buf_size(abuf)); |
d3c2ae1c | 361 | arc_buf_destroy(abuf, &abuf); |
428870ff | 362 | } |
34dc7c2f BB |
363 | |
364 | return (error); | |
365 | } | |
366 | ||
fb087146 AH |
367 | void |
368 | zil_sums_init(zil_sums_t *zs) | |
369 | { | |
370 | wmsum_init(&zs->zil_commit_count, 0); | |
371 | wmsum_init(&zs->zil_commit_writer_count, 0); | |
372 | wmsum_init(&zs->zil_itx_count, 0); | |
373 | wmsum_init(&zs->zil_itx_indirect_count, 0); | |
374 | wmsum_init(&zs->zil_itx_indirect_bytes, 0); | |
375 | wmsum_init(&zs->zil_itx_copied_count, 0); | |
376 | wmsum_init(&zs->zil_itx_copied_bytes, 0); | |
377 | wmsum_init(&zs->zil_itx_needcopy_count, 0); | |
378 | wmsum_init(&zs->zil_itx_needcopy_bytes, 0); | |
379 | wmsum_init(&zs->zil_itx_metaslab_normal_count, 0); | |
380 | wmsum_init(&zs->zil_itx_metaslab_normal_bytes, 0); | |
b6fbe61f AM |
381 | wmsum_init(&zs->zil_itx_metaslab_normal_write, 0); |
382 | wmsum_init(&zs->zil_itx_metaslab_normal_alloc, 0); | |
fb087146 AH |
383 | wmsum_init(&zs->zil_itx_metaslab_slog_count, 0); |
384 | wmsum_init(&zs->zil_itx_metaslab_slog_bytes, 0); | |
b6fbe61f AM |
385 | wmsum_init(&zs->zil_itx_metaslab_slog_write, 0); |
386 | wmsum_init(&zs->zil_itx_metaslab_slog_alloc, 0); | |
fb087146 AH |
387 | } |
388 | ||
389 | void | |
390 | zil_sums_fini(zil_sums_t *zs) | |
391 | { | |
392 | wmsum_fini(&zs->zil_commit_count); | |
393 | wmsum_fini(&zs->zil_commit_writer_count); | |
394 | wmsum_fini(&zs->zil_itx_count); | |
395 | wmsum_fini(&zs->zil_itx_indirect_count); | |
396 | wmsum_fini(&zs->zil_itx_indirect_bytes); | |
397 | wmsum_fini(&zs->zil_itx_copied_count); | |
398 | wmsum_fini(&zs->zil_itx_copied_bytes); | |
399 | wmsum_fini(&zs->zil_itx_needcopy_count); | |
400 | wmsum_fini(&zs->zil_itx_needcopy_bytes); | |
401 | wmsum_fini(&zs->zil_itx_metaslab_normal_count); | |
402 | wmsum_fini(&zs->zil_itx_metaslab_normal_bytes); | |
b6fbe61f AM |
403 | wmsum_fini(&zs->zil_itx_metaslab_normal_write); |
404 | wmsum_fini(&zs->zil_itx_metaslab_normal_alloc); | |
fb087146 AH |
405 | wmsum_fini(&zs->zil_itx_metaslab_slog_count); |
406 | wmsum_fini(&zs->zil_itx_metaslab_slog_bytes); | |
b6fbe61f AM |
407 | wmsum_fini(&zs->zil_itx_metaslab_slog_write); |
408 | wmsum_fini(&zs->zil_itx_metaslab_slog_alloc); | |
fb087146 AH |
409 | } |
410 | ||
411 | void | |
412 | zil_kstat_values_update(zil_kstat_values_t *zs, zil_sums_t *zil_sums) | |
413 | { | |
414 | zs->zil_commit_count.value.ui64 = | |
415 | wmsum_value(&zil_sums->zil_commit_count); | |
416 | zs->zil_commit_writer_count.value.ui64 = | |
417 | wmsum_value(&zil_sums->zil_commit_writer_count); | |
418 | zs->zil_itx_count.value.ui64 = | |
419 | wmsum_value(&zil_sums->zil_itx_count); | |
420 | zs->zil_itx_indirect_count.value.ui64 = | |
421 | wmsum_value(&zil_sums->zil_itx_indirect_count); | |
422 | zs->zil_itx_indirect_bytes.value.ui64 = | |
423 | wmsum_value(&zil_sums->zil_itx_indirect_bytes); | |
424 | zs->zil_itx_copied_count.value.ui64 = | |
425 | wmsum_value(&zil_sums->zil_itx_copied_count); | |
426 | zs->zil_itx_copied_bytes.value.ui64 = | |
427 | wmsum_value(&zil_sums->zil_itx_copied_bytes); | |
428 | zs->zil_itx_needcopy_count.value.ui64 = | |
429 | wmsum_value(&zil_sums->zil_itx_needcopy_count); | |
430 | zs->zil_itx_needcopy_bytes.value.ui64 = | |
431 | wmsum_value(&zil_sums->zil_itx_needcopy_bytes); | |
432 | zs->zil_itx_metaslab_normal_count.value.ui64 = | |
433 | wmsum_value(&zil_sums->zil_itx_metaslab_normal_count); | |
434 | zs->zil_itx_metaslab_normal_bytes.value.ui64 = | |
435 | wmsum_value(&zil_sums->zil_itx_metaslab_normal_bytes); | |
b6fbe61f AM |
436 | zs->zil_itx_metaslab_normal_write.value.ui64 = |
437 | wmsum_value(&zil_sums->zil_itx_metaslab_normal_write); | |
438 | zs->zil_itx_metaslab_normal_alloc.value.ui64 = | |
439 | wmsum_value(&zil_sums->zil_itx_metaslab_normal_alloc); | |
fb087146 AH |
440 | zs->zil_itx_metaslab_slog_count.value.ui64 = |
441 | wmsum_value(&zil_sums->zil_itx_metaslab_slog_count); | |
442 | zs->zil_itx_metaslab_slog_bytes.value.ui64 = | |
443 | wmsum_value(&zil_sums->zil_itx_metaslab_slog_bytes); | |
b6fbe61f AM |
444 | zs->zil_itx_metaslab_slog_write.value.ui64 = |
445 | wmsum_value(&zil_sums->zil_itx_metaslab_slog_write); | |
446 | zs->zil_itx_metaslab_slog_alloc.value.ui64 = | |
447 | wmsum_value(&zil_sums->zil_itx_metaslab_slog_alloc); | |
fb087146 AH |
448 | } |
449 | ||
34dc7c2f BB |
450 | /* |
451 | * Parse the intent log, and call parse_func for each valid record within. | |
34dc7c2f | 452 | */ |
428870ff | 453 | int |
34dc7c2f | 454 | zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, |
b5256303 TC |
455 | zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg, |
456 | boolean_t decrypt) | |
34dc7c2f BB |
457 | { |
458 | const zil_header_t *zh = zilog->zl_header; | |
428870ff BB |
459 | boolean_t claimed = !!zh->zh_claim_txg; |
460 | uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX; | |
461 | uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX; | |
462 | uint64_t max_blk_seq = 0; | |
463 | uint64_t max_lr_seq = 0; | |
464 | uint64_t blk_count = 0; | |
465 | uint64_t lr_count = 0; | |
861166b0 | 466 | blkptr_t blk, next_blk = {{{{0}}}}; |
428870ff | 467 | int error = 0; |
34dc7c2f | 468 | |
428870ff BB |
469 | /* |
470 | * Old logs didn't record the maximum zh_claim_lr_seq. | |
471 | */ | |
472 | if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID)) | |
473 | claim_lr_seq = UINT64_MAX; | |
34dc7c2f BB |
474 | |
475 | /* | |
476 | * Starting at the block pointed to by zh_log we read the log chain. | |
477 | * For each block in the chain we strongly check that block to | |
478 | * ensure its validity. We stop when an invalid block is found. | |
479 | * For each block pointer in the chain we call parse_blk_func(). | |
480 | * For each record in each valid block we call parse_lr_func(). | |
481 | * If the log has been claimed, stop if we encounter a sequence | |
482 | * number greater than the highest claimed sequence number. | |
483 | */ | |
428870ff | 484 | zil_bp_tree_init(zilog); |
34dc7c2f | 485 | |
428870ff BB |
486 | for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) { |
487 | uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ]; | |
488 | int reclen; | |
482da24e AM |
489 | char *lrp, *end; |
490 | arc_buf_t *abuf = NULL; | |
34dc7c2f | 491 | |
428870ff BB |
492 | if (blk_seq > claim_blk_seq) |
493 | break; | |
b5256303 TC |
494 | |
495 | error = parse_blk_func(zilog, &blk, arg, txg); | |
496 | if (error != 0) | |
428870ff BB |
497 | break; |
498 | ASSERT3U(max_blk_seq, <, blk_seq); | |
499 | max_blk_seq = blk_seq; | |
500 | blk_count++; | |
34dc7c2f | 501 | |
428870ff BB |
502 | if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq) |
503 | break; | |
34dc7c2f | 504 | |
b5256303 | 505 | error = zil_read_log_block(zilog, decrypt, &blk, &next_blk, |
482da24e | 506 | &lrp, &end, &abuf); |
748b9d5b | 507 | if (error != 0) { |
482da24e AM |
508 | if (abuf) |
509 | arc_buf_destroy(abuf, &abuf); | |
748b9d5b RM |
510 | if (claimed) { |
511 | char name[ZFS_MAX_DATASET_NAME_LEN]; | |
512 | ||
513 | dmu_objset_name(zilog->zl_os, name); | |
514 | ||
515 | cmn_err(CE_WARN, "ZFS read log block error %d, " | |
516 | "dataset %s, seq 0x%llx\n", error, name, | |
517 | (u_longlong_t)blk_seq); | |
518 | } | |
34dc7c2f | 519 | break; |
748b9d5b | 520 | } |
34dc7c2f | 521 | |
482da24e | 522 | for (; lrp < end; lrp += reclen) { |
34dc7c2f BB |
523 | lr_t *lr = (lr_t *)lrp; |
524 | reclen = lr->lrc_reclen; | |
525 | ASSERT3U(reclen, >=, sizeof (lr_t)); | |
8e8acabd AM |
526 | if (lr->lrc_seq > claim_lr_seq) { |
527 | arc_buf_destroy(abuf, &abuf); | |
428870ff | 528 | goto done; |
8e8acabd | 529 | } |
b5256303 TC |
530 | |
531 | error = parse_lr_func(zilog, lr, arg, txg); | |
8e8acabd AM |
532 | if (error != 0) { |
533 | arc_buf_destroy(abuf, &abuf); | |
428870ff | 534 | goto done; |
8e8acabd | 535 | } |
428870ff BB |
536 | ASSERT3U(max_lr_seq, <, lr->lrc_seq); |
537 | max_lr_seq = lr->lrc_seq; | |
538 | lr_count++; | |
34dc7c2f | 539 | } |
482da24e | 540 | arc_buf_destroy(abuf, &abuf); |
34dc7c2f | 541 | } |
428870ff BB |
542 | done: |
543 | zilog->zl_parse_error = error; | |
544 | zilog->zl_parse_blk_seq = max_blk_seq; | |
545 | zilog->zl_parse_lr_seq = max_lr_seq; | |
546 | zilog->zl_parse_blk_count = blk_count; | |
547 | zilog->zl_parse_lr_count = lr_count; | |
548 | ||
428870ff | 549 | zil_bp_tree_fini(zilog); |
34dc7c2f | 550 | |
428870ff | 551 | return (error); |
34dc7c2f BB |
552 | } |
553 | ||
d2734cce | 554 | static int |
61868bb1 CS |
555 | zil_clear_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, |
556 | uint64_t first_txg) | |
d2734cce | 557 | { |
14e4e3cb | 558 | (void) tx; |
d2734cce SD |
559 | ASSERT(!BP_IS_HOLE(bp)); |
560 | ||
561 | /* | |
562 | * As we call this function from the context of a rewind to a | |
563 | * checkpoint, each ZIL block whose txg is later than the txg | |
564 | * that we rewind to is invalid. Thus, we return -1 so | |
565 | * zil_parse() doesn't attempt to read it. | |
566 | */ | |
567 | if (bp->blk_birth >= first_txg) | |
568 | return (-1); | |
569 | ||
570 | if (zil_bp_tree_add(zilog, bp) != 0) | |
571 | return (0); | |
572 | ||
573 | zio_free(zilog->zl_spa, first_txg, bp); | |
574 | return (0); | |
575 | } | |
576 | ||
d2734cce | 577 | static int |
61868bb1 CS |
578 | zil_noop_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, |
579 | uint64_t first_txg) | |
d2734cce | 580 | { |
14e4e3cb | 581 | (void) zilog, (void) lrc, (void) tx, (void) first_txg; |
d2734cce SD |
582 | return (0); |
583 | } | |
584 | ||
428870ff | 585 | static int |
61868bb1 CS |
586 | zil_claim_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, |
587 | uint64_t first_txg) | |
34dc7c2f | 588 | { |
34dc7c2f BB |
589 | /* |
590 | * Claim log block if not already committed and not already claimed. | |
428870ff | 591 | * If tx == NULL, just verify that the block is claimable. |
34dc7c2f | 592 | */ |
b0bc7a84 MG |
593 | if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg || |
594 | zil_bp_tree_add(zilog, bp) != 0) | |
428870ff BB |
595 | return (0); |
596 | ||
597 | return (zio_wait(zio_claim(NULL, zilog->zl_spa, | |
598 | tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL, | |
599 | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB))); | |
34dc7c2f BB |
600 | } |
601 | ||
428870ff | 602 | static int |
67a1b037 | 603 | zil_claim_write(zilog_t *zilog, const lr_t *lrc, void *tx, uint64_t first_txg) |
34dc7c2f | 604 | { |
428870ff BB |
605 | lr_write_t *lr = (lr_write_t *)lrc; |
606 | int error; | |
607 | ||
67a1b037 | 608 | ASSERT(lrc->lrc_txtype == TX_WRITE); |
428870ff BB |
609 | |
610 | /* | |
611 | * If the block is not readable, don't claim it. This can happen | |
612 | * in normal operation when a log block is written to disk before | |
613 | * some of the dmu_sync() blocks it points to. In this case, the | |
614 | * transaction cannot have been committed to anyone (we would have | |
615 | * waited for all writes to be stable first), so it is semantically | |
616 | * correct to declare this the end of the log. | |
617 | */ | |
b5256303 TC |
618 | if (lr->lr_blkptr.blk_birth >= first_txg) { |
619 | error = zil_read_log_data(zilog, lr, NULL); | |
620 | if (error != 0) | |
621 | return (error); | |
622 | } | |
623 | ||
428870ff | 624 | return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg)); |
34dc7c2f BB |
625 | } |
626 | ||
67a1b037 PJD |
627 | static int |
628 | zil_claim_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx) | |
629 | { | |
630 | const lr_clone_range_t *lr = (const lr_clone_range_t *)lrc; | |
631 | const blkptr_t *bp; | |
632 | spa_t *spa; | |
633 | uint_t ii; | |
634 | ||
635 | ASSERT(lrc->lrc_txtype == TX_CLONE_RANGE); | |
636 | ||
637 | if (tx == NULL) { | |
638 | return (0); | |
639 | } | |
640 | ||
641 | /* | |
642 | * XXX: Do we need to byteswap lr? | |
643 | */ | |
644 | ||
645 | spa = zilog->zl_spa; | |
646 | ||
647 | for (ii = 0; ii < lr->lr_nbps; ii++) { | |
648 | bp = &lr->lr_bps[ii]; | |
649 | ||
650 | /* | |
651 | * When data in embedded into BP there is no need to create | |
652 | * BRT entry as there is no data block. Just copy the BP as | |
653 | * it contains the data. | |
654 | */ | |
655 | if (!BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) { | |
656 | brt_pending_add(spa, bp, tx); | |
657 | } | |
658 | } | |
659 | ||
660 | return (0); | |
661 | } | |
662 | ||
663 | static int | |
664 | zil_claim_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, | |
665 | uint64_t first_txg) | |
666 | { | |
667 | ||
668 | switch (lrc->lrc_txtype) { | |
669 | case TX_WRITE: | |
670 | return (zil_claim_write(zilog, lrc, tx, first_txg)); | |
671 | case TX_CLONE_RANGE: | |
672 | return (zil_claim_clone_range(zilog, lrc, tx)); | |
673 | default: | |
674 | return (0); | |
675 | } | |
676 | } | |
677 | ||
428870ff | 678 | static int |
61868bb1 CS |
679 | zil_free_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx, |
680 | uint64_t claim_txg) | |
34dc7c2f | 681 | { |
14e4e3cb AZ |
682 | (void) claim_txg; |
683 | ||
d2734cce | 684 | zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); |
428870ff BB |
685 | |
686 | return (0); | |
34dc7c2f BB |
687 | } |
688 | ||
428870ff | 689 | static int |
67a1b037 | 690 | zil_free_write(zilog_t *zilog, const lr_t *lrc, void *tx, uint64_t claim_txg) |
34dc7c2f | 691 | { |
428870ff BB |
692 | lr_write_t *lr = (lr_write_t *)lrc; |
693 | blkptr_t *bp = &lr->lr_blkptr; | |
694 | ||
67a1b037 PJD |
695 | ASSERT(lrc->lrc_txtype == TX_WRITE); |
696 | ||
34dc7c2f BB |
697 | /* |
698 | * If we previously claimed it, we need to free it. | |
699 | */ | |
67a1b037 PJD |
700 | if (bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 && |
701 | !BP_IS_HOLE(bp)) { | |
428870ff | 702 | zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp); |
67a1b037 | 703 | } |
428870ff BB |
704 | |
705 | return (0); | |
706 | } | |
707 | ||
67a1b037 PJD |
708 | static int |
709 | zil_free_clone_range(zilog_t *zilog, const lr_t *lrc, void *tx) | |
710 | { | |
711 | const lr_clone_range_t *lr = (const lr_clone_range_t *)lrc; | |
712 | const blkptr_t *bp; | |
713 | spa_t *spa; | |
714 | uint_t ii; | |
715 | ||
716 | ASSERT(lrc->lrc_txtype == TX_CLONE_RANGE); | |
717 | ||
718 | if (tx == NULL) { | |
719 | return (0); | |
720 | } | |
721 | ||
722 | spa = zilog->zl_spa; | |
723 | ||
724 | for (ii = 0; ii < lr->lr_nbps; ii++) { | |
725 | bp = &lr->lr_bps[ii]; | |
726 | ||
727 | if (!BP_IS_HOLE(bp)) { | |
728 | zio_free(spa, dmu_tx_get_txg(tx), bp); | |
729 | } | |
730 | } | |
731 | ||
732 | return (0); | |
733 | } | |
734 | ||
735 | static int | |
736 | zil_free_log_record(zilog_t *zilog, const lr_t *lrc, void *tx, | |
737 | uint64_t claim_txg) | |
738 | { | |
739 | ||
740 | if (claim_txg == 0) { | |
741 | return (0); | |
742 | } | |
743 | ||
744 | switch (lrc->lrc_txtype) { | |
745 | case TX_WRITE: | |
746 | return (zil_free_write(zilog, lrc, tx, claim_txg)); | |
747 | case TX_CLONE_RANGE: | |
748 | return (zil_free_clone_range(zilog, lrc, tx)); | |
749 | default: | |
750 | return (0); | |
751 | } | |
752 | } | |
753 | ||
1ce23dca PS |
754 | static int |
755 | zil_lwb_vdev_compare(const void *x1, const void *x2) | |
756 | { | |
757 | const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev; | |
758 | const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev; | |
759 | ||
ca577779 | 760 | return (TREE_CMP(v1, v2)); |
1ce23dca PS |
761 | } |
762 | ||
428870ff | 763 | static lwb_t * |
b22bab25 | 764 | zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, boolean_t slog, uint64_t txg) |
428870ff BB |
765 | { |
766 | lwb_t *lwb; | |
767 | ||
79c76d5b | 768 | lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); |
428870ff BB |
769 | lwb->lwb_zilog = zilog; |
770 | lwb->lwb_blk = *bp; | |
1b7c1e5c | 771 | lwb->lwb_slog = slog; |
f63811f0 AM |
772 | lwb->lwb_indirect = B_FALSE; |
773 | if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) { | |
774 | lwb->lwb_nused = lwb->lwb_nfilled = sizeof (zil_chain_t); | |
775 | lwb->lwb_sz = BP_GET_LSIZE(bp); | |
776 | } else { | |
777 | lwb->lwb_nused = lwb->lwb_nfilled = 0; | |
778 | lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t); | |
779 | } | |
1ce23dca | 780 | lwb->lwb_state = LWB_STATE_CLOSED; |
428870ff | 781 | lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp)); |
1ce23dca PS |
782 | lwb->lwb_write_zio = NULL; |
783 | lwb->lwb_root_zio = NULL; | |
1ce23dca | 784 | lwb->lwb_issued_timestamp = 0; |
152d6fda | 785 | lwb->lwb_issued_txg = 0; |
f63811f0 | 786 | lwb->lwb_max_txg = txg; |
428870ff BB |
787 | |
788 | mutex_enter(&zilog->zl_lock); | |
789 | list_insert_tail(&zilog->zl_lwb_list, lwb); | |
790 | mutex_exit(&zilog->zl_lock); | |
791 | ||
792 | return (lwb); | |
34dc7c2f BB |
793 | } |
794 | ||
1ce23dca PS |
795 | static void |
796 | zil_free_lwb(zilog_t *zilog, lwb_t *lwb) | |
797 | { | |
798 | ASSERT(MUTEX_HELD(&zilog->zl_lock)); | |
799 | ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock)); | |
233425a1 AM |
800 | VERIFY(list_is_empty(&lwb->lwb_waiters)); |
801 | VERIFY(list_is_empty(&lwb->lwb_itxs)); | |
1ce23dca | 802 | ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); |
1ce23dca PS |
803 | ASSERT3P(lwb->lwb_write_zio, ==, NULL); |
804 | ASSERT3P(lwb->lwb_root_zio, ==, NULL); | |
2fe61a7e PS |
805 | ASSERT3U(lwb->lwb_max_txg, <=, spa_syncing_txg(zilog->zl_spa)); |
806 | ASSERT(lwb->lwb_state == LWB_STATE_CLOSED || | |
900d09b2 | 807 | lwb->lwb_state == LWB_STATE_FLUSH_DONE); |
1ce23dca PS |
808 | |
809 | /* | |
810 | * Clear the zilog's field to indicate this lwb is no longer | |
811 | * valid, and prevent use-after-free errors. | |
812 | */ | |
813 | if (zilog->zl_last_lwb_opened == lwb) | |
814 | zilog->zl_last_lwb_opened = NULL; | |
815 | ||
816 | kmem_cache_free(zil_lwb_cache, lwb); | |
817 | } | |
818 | ||
29809a6c MA |
819 | /* |
820 | * Called when we create in-memory log transactions so that we know | |
821 | * to cleanup the itxs at the end of spa_sync(). | |
822 | */ | |
65c7cc49 | 823 | static void |
29809a6c MA |
824 | zilog_dirty(zilog_t *zilog, uint64_t txg) |
825 | { | |
826 | dsl_pool_t *dp = zilog->zl_dmu_pool; | |
827 | dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); | |
828 | ||
1ce23dca PS |
829 | ASSERT(spa_writeable(zilog->zl_spa)); |
830 | ||
0c66c32d | 831 | if (ds->ds_is_snapshot) |
29809a6c MA |
832 | panic("dirtying snapshot!"); |
833 | ||
13fe0198 | 834 | if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) { |
29809a6c MA |
835 | /* up the hold count until we can be written out */ |
836 | dmu_buf_add_ref(ds->ds_dbuf, zilog); | |
1ce23dca PS |
837 | |
838 | zilog->zl_dirty_max_txg = MAX(txg, zilog->zl_dirty_max_txg); | |
29809a6c MA |
839 | } |
840 | } | |
841 | ||
55922e73 GW |
842 | /* |
843 | * Determine if the zil is dirty in the specified txg. Callers wanting to | |
844 | * ensure that the dirty state does not change must hold the itxg_lock for | |
845 | * the specified txg. Holding the lock will ensure that the zil cannot be | |
846 | * dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current | |
847 | * state. | |
848 | */ | |
65c7cc49 | 849 | static boolean_t __maybe_unused |
55922e73 GW |
850 | zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg) |
851 | { | |
852 | dsl_pool_t *dp = zilog->zl_dmu_pool; | |
853 | ||
854 | if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK)) | |
855 | return (B_TRUE); | |
856 | return (B_FALSE); | |
857 | } | |
858 | ||
859 | /* | |
860 | * Determine if the zil is dirty. The zil is considered dirty if it has | |
861 | * any pending itx records that have not been cleaned by zil_clean(). | |
862 | */ | |
65c7cc49 | 863 | static boolean_t |
29809a6c MA |
864 | zilog_is_dirty(zilog_t *zilog) |
865 | { | |
866 | dsl_pool_t *dp = zilog->zl_dmu_pool; | |
29809a6c | 867 | |
1c27024e | 868 | for (int t = 0; t < TXG_SIZE; t++) { |
29809a6c MA |
869 | if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t)) |
870 | return (B_TRUE); | |
871 | } | |
872 | return (B_FALSE); | |
873 | } | |
874 | ||
361a7e82 JP |
875 | /* |
876 | * Its called in zil_commit context (zil_process_commit_list()/zil_create()). | |
877 | * It activates SPA_FEATURE_ZILSAXATTR feature, if its enabled. | |
878 | * Check dsl_dataset_feature_is_active to avoid txg_wait_synced() on every | |
879 | * zil_commit. | |
880 | */ | |
881 | static void | |
882 | zil_commit_activate_saxattr_feature(zilog_t *zilog) | |
883 | { | |
884 | dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); | |
885 | uint64_t txg = 0; | |
886 | dmu_tx_t *tx = NULL; | |
887 | ||
dbf6108b | 888 | if (spa_feature_is_enabled(zilog->zl_spa, SPA_FEATURE_ZILSAXATTR) && |
361a7e82 | 889 | dmu_objset_type(zilog->zl_os) != DMU_OST_ZVOL && |
dbf6108b | 890 | !dsl_dataset_feature_is_active(ds, SPA_FEATURE_ZILSAXATTR)) { |
361a7e82 JP |
891 | tx = dmu_tx_create(zilog->zl_os); |
892 | VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); | |
893 | dsl_dataset_dirty(ds, tx); | |
894 | txg = dmu_tx_get_txg(tx); | |
895 | ||
896 | mutex_enter(&ds->ds_lock); | |
897 | ds->ds_feature_activation[SPA_FEATURE_ZILSAXATTR] = | |
898 | (void *)B_TRUE; | |
899 | mutex_exit(&ds->ds_lock); | |
900 | dmu_tx_commit(tx); | |
901 | txg_wait_synced(zilog->zl_dmu_pool, txg); | |
902 | } | |
903 | } | |
904 | ||
34dc7c2f BB |
905 | /* |
906 | * Create an on-disk intent log. | |
907 | */ | |
428870ff | 908 | static lwb_t * |
34dc7c2f BB |
909 | zil_create(zilog_t *zilog) |
910 | { | |
911 | const zil_header_t *zh = zilog->zl_header; | |
428870ff | 912 | lwb_t *lwb = NULL; |
34dc7c2f BB |
913 | uint64_t txg = 0; |
914 | dmu_tx_t *tx = NULL; | |
915 | blkptr_t blk; | |
916 | int error = 0; | |
1b7c1e5c | 917 | boolean_t slog = FALSE; |
361a7e82 JP |
918 | dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); |
919 | ||
34dc7c2f BB |
920 | |
921 | /* | |
922 | * Wait for any previous destroy to complete. | |
923 | */ | |
924 | txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); | |
925 | ||
926 | ASSERT(zh->zh_claim_txg == 0); | |
927 | ASSERT(zh->zh_replay_seq == 0); | |
928 | ||
929 | blk = zh->zh_log; | |
930 | ||
931 | /* | |
428870ff BB |
932 | * Allocate an initial log block if: |
933 | * - there isn't one already | |
4e33ba4c | 934 | * - the existing block is the wrong endianness |
34dc7c2f | 935 | */ |
fb5f0bc8 | 936 | if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { |
34dc7c2f | 937 | tx = dmu_tx_create(zilog->zl_os); |
1ce23dca | 938 | VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); |
34dc7c2f BB |
939 | dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); |
940 | txg = dmu_tx_get_txg(tx); | |
941 | ||
fb5f0bc8 | 942 | if (!BP_IS_HOLE(&blk)) { |
d2734cce | 943 | zio_free(zilog->zl_spa, txg, &blk); |
fb5f0bc8 BB |
944 | BP_ZERO(&blk); |
945 | } | |
946 | ||
b5256303 | 947 | error = zio_alloc_zil(zilog->zl_spa, zilog->zl_os, txg, &blk, |
1b7c1e5c | 948 | ZIL_MIN_BLKSZ, &slog); |
34dc7c2f BB |
949 | if (error == 0) |
950 | zil_init_log_chain(zilog, &blk); | |
951 | } | |
952 | ||
953 | /* | |
1ce23dca | 954 | * Allocate a log write block (lwb) for the first log block. |
34dc7c2f | 955 | */ |
428870ff | 956 | if (error == 0) |
b22bab25 | 957 | lwb = zil_alloc_lwb(zilog, &blk, slog, txg); |
34dc7c2f BB |
958 | |
959 | /* | |
960 | * If we just allocated the first log block, commit our transaction | |
2fe61a7e | 961 | * and wait for zil_sync() to stuff the block pointer into zh_log. |
34dc7c2f BB |
962 | * (zh is part of the MOS, so we cannot modify it in open context.) |
963 | */ | |
964 | if (tx != NULL) { | |
361a7e82 JP |
965 | /* |
966 | * If "zilsaxattr" feature is enabled on zpool, then activate | |
967 | * it now when we're creating the ZIL chain. We can't wait with | |
968 | * this until we write the first xattr log record because we | |
969 | * need to wait for the feature activation to sync out. | |
970 | */ | |
971 | if (spa_feature_is_enabled(zilog->zl_spa, | |
972 | SPA_FEATURE_ZILSAXATTR) && dmu_objset_type(zilog->zl_os) != | |
973 | DMU_OST_ZVOL) { | |
974 | mutex_enter(&ds->ds_lock); | |
975 | ds->ds_feature_activation[SPA_FEATURE_ZILSAXATTR] = | |
976 | (void *)B_TRUE; | |
977 | mutex_exit(&ds->ds_lock); | |
978 | } | |
979 | ||
34dc7c2f BB |
980 | dmu_tx_commit(tx); |
981 | txg_wait_synced(zilog->zl_dmu_pool, txg); | |
361a7e82 JP |
982 | } else { |
983 | /* | |
984 | * This branch covers the case where we enable the feature on a | |
985 | * zpool that has existing ZIL headers. | |
986 | */ | |
987 | zil_commit_activate_saxattr_feature(zilog); | |
34dc7c2f | 988 | } |
361a7e82 JP |
989 | IMPLY(spa_feature_is_enabled(zilog->zl_spa, SPA_FEATURE_ZILSAXATTR) && |
990 | dmu_objset_type(zilog->zl_os) != DMU_OST_ZVOL, | |
991 | dsl_dataset_feature_is_active(ds, SPA_FEATURE_ZILSAXATTR)); | |
34dc7c2f | 992 | |
861166b0 | 993 | ASSERT(error != 0 || memcmp(&blk, &zh->zh_log, sizeof (blk)) == 0); |
c04812f9 | 994 | IMPLY(error == 0, lwb != NULL); |
428870ff BB |
995 | |
996 | return (lwb); | |
34dc7c2f BB |
997 | } |
998 | ||
999 | /* | |
1ce23dca PS |
1000 | * In one tx, free all log blocks and clear the log header. If keep_first |
1001 | * is set, then we're replaying a log with no content. We want to keep the | |
1002 | * first block, however, so that the first synchronous transaction doesn't | |
1003 | * require a txg_wait_synced() in zil_create(). We don't need to | |
1004 | * txg_wait_synced() here either when keep_first is set, because both | |
1005 | * zil_create() and zil_destroy() will wait for any in-progress destroys | |
1006 | * to complete. | |
e197bb24 | 1007 | * Return B_TRUE if there were any entries to replay. |
34dc7c2f | 1008 | */ |
e197bb24 | 1009 | boolean_t |
34dc7c2f BB |
1010 | zil_destroy(zilog_t *zilog, boolean_t keep_first) |
1011 | { | |
1012 | const zil_header_t *zh = zilog->zl_header; | |
1013 | lwb_t *lwb; | |
1014 | dmu_tx_t *tx; | |
1015 | uint64_t txg; | |
1016 | ||
1017 | /* | |
1018 | * Wait for any previous destroy to complete. | |
1019 | */ | |
1020 | txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); | |
1021 | ||
428870ff BB |
1022 | zilog->zl_old_header = *zh; /* debugging aid */ |
1023 | ||
34dc7c2f | 1024 | if (BP_IS_HOLE(&zh->zh_log)) |
e197bb24 | 1025 | return (B_FALSE); |
34dc7c2f BB |
1026 | |
1027 | tx = dmu_tx_create(zilog->zl_os); | |
1ce23dca | 1028 | VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); |
34dc7c2f BB |
1029 | dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); |
1030 | txg = dmu_tx_get_txg(tx); | |
1031 | ||
1032 | mutex_enter(&zilog->zl_lock); | |
1033 | ||
34dc7c2f BB |
1034 | ASSERT3U(zilog->zl_destroy_txg, <, txg); |
1035 | zilog->zl_destroy_txg = txg; | |
1036 | zilog->zl_keep_first = keep_first; | |
1037 | ||
1038 | if (!list_is_empty(&zilog->zl_lwb_list)) { | |
1039 | ASSERT(zh->zh_claim_txg == 0); | |
3e31d2b0 | 1040 | VERIFY(!keep_first); |
895e0313 | 1041 | while ((lwb = list_remove_head(&zilog->zl_lwb_list)) != NULL) { |
34dc7c2f BB |
1042 | if (lwb->lwb_buf != NULL) |
1043 | zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); | |
1ce23dca PS |
1044 | zio_free(zilog->zl_spa, txg, &lwb->lwb_blk); |
1045 | zil_free_lwb(zilog, lwb); | |
34dc7c2f | 1046 | } |
428870ff | 1047 | } else if (!keep_first) { |
29809a6c | 1048 | zil_destroy_sync(zilog, tx); |
34dc7c2f BB |
1049 | } |
1050 | mutex_exit(&zilog->zl_lock); | |
1051 | ||
1052 | dmu_tx_commit(tx); | |
e197bb24 AS |
1053 | |
1054 | return (B_TRUE); | |
34dc7c2f BB |
1055 | } |
1056 | ||
29809a6c MA |
1057 | void |
1058 | zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx) | |
1059 | { | |
1060 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); | |
1061 | (void) zil_parse(zilog, zil_free_log_block, | |
b5256303 | 1062 | zil_free_log_record, tx, zilog->zl_header->zh_claim_txg, B_FALSE); |
29809a6c MA |
1063 | } |
1064 | ||
34dc7c2f | 1065 | int |
9c43027b | 1066 | zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg) |
34dc7c2f BB |
1067 | { |
1068 | dmu_tx_t *tx = txarg; | |
34dc7c2f | 1069 | zilog_t *zilog; |
d2734cce | 1070 | uint64_t first_txg; |
34dc7c2f BB |
1071 | zil_header_t *zh; |
1072 | objset_t *os; | |
1073 | int error; | |
1074 | ||
9c43027b | 1075 | error = dmu_objset_own_obj(dp, ds->ds_object, |
b5256303 | 1076 | DMU_OST_ANY, B_FALSE, B_FALSE, FTAG, &os); |
13fe0198 | 1077 | if (error != 0) { |
6d9036f3 MA |
1078 | /* |
1079 | * EBUSY indicates that the objset is inconsistent, in which | |
1080 | * case it can not have a ZIL. | |
1081 | */ | |
1082 | if (error != EBUSY) { | |
9c43027b AJ |
1083 | cmn_err(CE_WARN, "can't open objset for %llu, error %u", |
1084 | (unsigned long long)ds->ds_object, error); | |
6d9036f3 MA |
1085 | } |
1086 | ||
34dc7c2f BB |
1087 | return (0); |
1088 | } | |
1089 | ||
1090 | zilog = dmu_objset_zil(os); | |
1091 | zh = zil_header_in_syncing_context(zilog); | |
d2734cce SD |
1092 | ASSERT3U(tx->tx_txg, ==, spa_first_txg(zilog->zl_spa)); |
1093 | first_txg = spa_min_claim_txg(zilog->zl_spa); | |
34dc7c2f | 1094 | |
d2734cce SD |
1095 | /* |
1096 | * If the spa_log_state is not set to be cleared, check whether | |
1097 | * the current uberblock is a checkpoint one and if the current | |
1098 | * header has been claimed before moving on. | |
1099 | * | |
1100 | * If the current uberblock is a checkpointed uberblock then | |
1101 | * one of the following scenarios took place: | |
1102 | * | |
1103 | * 1] We are currently rewinding to the checkpoint of the pool. | |
1104 | * 2] We crashed in the middle of a checkpoint rewind but we | |
1105 | * did manage to write the checkpointed uberblock to the | |
1106 | * vdev labels, so when we tried to import the pool again | |
1107 | * the checkpointed uberblock was selected from the import | |
1108 | * procedure. | |
1109 | * | |
1110 | * In both cases we want to zero out all the ZIL blocks, except | |
1111 | * the ones that have been claimed at the time of the checkpoint | |
1112 | * (their zh_claim_txg != 0). The reason is that these blocks | |
1113 | * may be corrupted since we may have reused their locations on | |
1114 | * disk after we took the checkpoint. | |
1115 | * | |
1116 | * We could try to set spa_log_state to SPA_LOG_CLEAR earlier | |
1117 | * when we first figure out whether the current uberblock is | |
1118 | * checkpointed or not. Unfortunately, that would discard all | |
1119 | * the logs, including the ones that are claimed, and we would | |
1120 | * leak space. | |
1121 | */ | |
1122 | if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR || | |
1123 | (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 && | |
1124 | zh->zh_claim_txg == 0)) { | |
1125 | if (!BP_IS_HOLE(&zh->zh_log)) { | |
1126 | (void) zil_parse(zilog, zil_clear_log_block, | |
1127 | zil_noop_log_record, tx, first_txg, B_FALSE); | |
1128 | } | |
9babb374 | 1129 | BP_ZERO(&zh->zh_log); |
b5256303 | 1130 | if (os->os_encrypted) |
1b66810b | 1131 | os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; |
9babb374 | 1132 | dsl_dataset_dirty(dmu_objset_ds(os), tx); |
b5256303 | 1133 | dmu_objset_disown(os, B_FALSE, FTAG); |
428870ff | 1134 | return (0); |
9babb374 BB |
1135 | } |
1136 | ||
d2734cce SD |
1137 | /* |
1138 | * If we are not rewinding and opening the pool normally, then | |
1139 | * the min_claim_txg should be equal to the first txg of the pool. | |
1140 | */ | |
1141 | ASSERT3U(first_txg, ==, spa_first_txg(zilog->zl_spa)); | |
1142 | ||
34dc7c2f BB |
1143 | /* |
1144 | * Claim all log blocks if we haven't already done so, and remember | |
1145 | * the highest claimed sequence number. This ensures that if we can | |
1146 | * read only part of the log now (e.g. due to a missing device), | |
1147 | * but we can read the entire log later, we will not try to replay | |
1148 | * or destroy beyond the last block we successfully claimed. | |
1149 | */ | |
1150 | ASSERT3U(zh->zh_claim_txg, <=, first_txg); | |
1151 | if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { | |
428870ff | 1152 | (void) zil_parse(zilog, zil_claim_log_block, |
b5256303 | 1153 | zil_claim_log_record, tx, first_txg, B_FALSE); |
428870ff BB |
1154 | zh->zh_claim_txg = first_txg; |
1155 | zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq; | |
1156 | zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq; | |
1157 | if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1) | |
1158 | zh->zh_flags |= ZIL_REPLAY_NEEDED; | |
1159 | zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID; | |
d53bd7f5 | 1160 | if (os->os_encrypted) |
1b66810b | 1161 | os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE; |
34dc7c2f BB |
1162 | dsl_dataset_dirty(dmu_objset_ds(os), tx); |
1163 | } | |
1164 | ||
1165 | ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); | |
b5256303 | 1166 | dmu_objset_disown(os, B_FALSE, FTAG); |
34dc7c2f BB |
1167 | return (0); |
1168 | } | |
1169 | ||
b128c09f BB |
1170 | /* |
1171 | * Check the log by walking the log chain. | |
1172 | * Checksum errors are ok as they indicate the end of the chain. | |
1173 | * Any other error (no device or read failure) returns an error. | |
1174 | */ | |
b128c09f | 1175 | int |
9c43027b | 1176 | zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx) |
b128c09f | 1177 | { |
14e4e3cb | 1178 | (void) dp; |
b128c09f | 1179 | zilog_t *zilog; |
b128c09f | 1180 | objset_t *os; |
572e2857 | 1181 | blkptr_t *bp; |
b128c09f BB |
1182 | int error; |
1183 | ||
428870ff BB |
1184 | ASSERT(tx == NULL); |
1185 | ||
9c43027b | 1186 | error = dmu_objset_from_ds(ds, &os); |
13fe0198 | 1187 | if (error != 0) { |
9c43027b AJ |
1188 | cmn_err(CE_WARN, "can't open objset %llu, error %d", |
1189 | (unsigned long long)ds->ds_object, error); | |
b128c09f BB |
1190 | return (0); |
1191 | } | |
1192 | ||
1193 | zilog = dmu_objset_zil(os); | |
572e2857 BB |
1194 | bp = (blkptr_t *)&zilog->zl_header->zh_log; |
1195 | ||
572e2857 BB |
1196 | if (!BP_IS_HOLE(bp)) { |
1197 | vdev_t *vd; | |
1198 | boolean_t valid = B_TRUE; | |
1199 | ||
d2734cce SD |
1200 | /* |
1201 | * Check the first block and determine if it's on a log device | |
1202 | * which may have been removed or faulted prior to loading this | |
1203 | * pool. If so, there's no point in checking the rest of the | |
1204 | * log as its content should have already been synced to the | |
1205 | * pool. | |
1206 | */ | |
572e2857 BB |
1207 | spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER); |
1208 | vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0])); | |
1209 | if (vd->vdev_islog && vdev_is_dead(vd)) | |
1210 | valid = vdev_log_state_valid(vd); | |
1211 | spa_config_exit(os->os_spa, SCL_STATE, FTAG); | |
1212 | ||
9c43027b | 1213 | if (!valid) |
572e2857 | 1214 | return (0); |
d2734cce SD |
1215 | |
1216 | /* | |
1217 | * Check whether the current uberblock is checkpointed (e.g. | |
1218 | * we are rewinding) and whether the current header has been | |
1219 | * claimed or not. If it hasn't then skip verifying it. We | |
1220 | * do this because its ZIL blocks may be part of the pool's | |
1221 | * state before the rewind, which is no longer valid. | |
1222 | */ | |
1223 | zil_header_t *zh = zil_header_in_syncing_context(zilog); | |
1224 | if (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 && | |
1225 | zh->zh_claim_txg == 0) | |
1226 | return (0); | |
572e2857 | 1227 | } |
b128c09f | 1228 | |
428870ff BB |
1229 | /* |
1230 | * Because tx == NULL, zil_claim_log_block() will not actually claim | |
1231 | * any blocks, but just determine whether it is possible to do so. | |
1232 | * In addition to checking the log chain, zil_claim_log_block() | |
1233 | * will invoke zio_claim() with a done func of spa_claim_notify(), | |
1234 | * which will update spa_max_claim_txg. See spa_load() for details. | |
1235 | */ | |
1236 | error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx, | |
d2734cce SD |
1237 | zilog->zl_header->zh_claim_txg ? -1ULL : |
1238 | spa_min_claim_txg(os->os_spa), B_FALSE); | |
428870ff | 1239 | |
428870ff | 1240 | return ((error == ECKSUM || error == ENOENT) ? 0 : error); |
b128c09f BB |
1241 | } |
1242 | ||
1ce23dca PS |
1243 | /* |
1244 | * When an itx is "skipped", this function is used to properly mark the | |
1245 | * waiter as "done, and signal any thread(s) waiting on it. An itx can | |
1246 | * be skipped (and not committed to an lwb) for a variety of reasons, | |
1247 | * one of them being that the itx was committed via spa_sync(), prior to | |
1248 | * it being committed to an lwb; this can happen if a thread calling | |
1249 | * zil_commit() is racing with spa_sync(). | |
1250 | */ | |
1251 | static void | |
1252 | zil_commit_waiter_skip(zil_commit_waiter_t *zcw) | |
34dc7c2f | 1253 | { |
1ce23dca PS |
1254 | mutex_enter(&zcw->zcw_lock); |
1255 | ASSERT3B(zcw->zcw_done, ==, B_FALSE); | |
1256 | zcw->zcw_done = B_TRUE; | |
1257 | cv_broadcast(&zcw->zcw_cv); | |
1258 | mutex_exit(&zcw->zcw_lock); | |
1259 | } | |
34dc7c2f | 1260 | |
1ce23dca PS |
1261 | /* |
1262 | * This function is used when the given waiter is to be linked into an | |
1263 | * lwb's "lwb_waiter" list; i.e. when the itx is committed to the lwb. | |
1264 | * At this point, the waiter will no longer be referenced by the itx, | |
1265 | * and instead, will be referenced by the lwb. | |
1266 | */ | |
1267 | static void | |
1268 | zil_commit_waiter_link_lwb(zil_commit_waiter_t *zcw, lwb_t *lwb) | |
1269 | { | |
2fe61a7e PS |
1270 | /* |
1271 | * The lwb_waiters field of the lwb is protected by the zilog's | |
1272 | * zl_lock, thus it must be held when calling this function. | |
1273 | */ | |
1274 | ASSERT(MUTEX_HELD(&lwb->lwb_zilog->zl_lock)); | |
1275 | ||
1ce23dca PS |
1276 | mutex_enter(&zcw->zcw_lock); |
1277 | ASSERT(!list_link_active(&zcw->zcw_node)); | |
1278 | ASSERT3P(zcw->zcw_lwb, ==, NULL); | |
1279 | ASSERT3P(lwb, !=, NULL); | |
1280 | ASSERT(lwb->lwb_state == LWB_STATE_OPENED || | |
900d09b2 PS |
1281 | lwb->lwb_state == LWB_STATE_ISSUED || |
1282 | lwb->lwb_state == LWB_STATE_WRITE_DONE); | |
1ce23dca PS |
1283 | |
1284 | list_insert_tail(&lwb->lwb_waiters, zcw); | |
1285 | zcw->zcw_lwb = lwb; | |
1286 | mutex_exit(&zcw->zcw_lock); | |
1287 | } | |
1288 | ||
1289 | /* | |
1290 | * This function is used when zio_alloc_zil() fails to allocate a ZIL | |
1291 | * block, and the given waiter must be linked to the "nolwb waiters" | |
1292 | * list inside of zil_process_commit_list(). | |
1293 | */ | |
1294 | static void | |
1295 | zil_commit_waiter_link_nolwb(zil_commit_waiter_t *zcw, list_t *nolwb) | |
1296 | { | |
1297 | mutex_enter(&zcw->zcw_lock); | |
1298 | ASSERT(!list_link_active(&zcw->zcw_node)); | |
1299 | ASSERT3P(zcw->zcw_lwb, ==, NULL); | |
1300 | list_insert_tail(nolwb, zcw); | |
1301 | mutex_exit(&zcw->zcw_lock); | |
34dc7c2f BB |
1302 | } |
1303 | ||
1304 | void | |
1ce23dca | 1305 | zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp) |
34dc7c2f | 1306 | { |
1ce23dca | 1307 | avl_tree_t *t = &lwb->lwb_vdev_tree; |
34dc7c2f BB |
1308 | avl_index_t where; |
1309 | zil_vdev_node_t *zv, zvsearch; | |
1310 | int ndvas = BP_GET_NDVAS(bp); | |
1311 | int i; | |
1312 | ||
53b1f5ea | 1313 | if (zil_nocacheflush) |
34dc7c2f BB |
1314 | return; |
1315 | ||
1ce23dca | 1316 | mutex_enter(&lwb->lwb_vdev_lock); |
34dc7c2f BB |
1317 | for (i = 0; i < ndvas; i++) { |
1318 | zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]); | |
1319 | if (avl_find(t, &zvsearch, &where) == NULL) { | |
79c76d5b | 1320 | zv = kmem_alloc(sizeof (*zv), KM_SLEEP); |
34dc7c2f BB |
1321 | zv->zv_vdev = zvsearch.zv_vdev; |
1322 | avl_insert(t, zv, where); | |
1323 | } | |
1324 | } | |
1ce23dca | 1325 | mutex_exit(&lwb->lwb_vdev_lock); |
34dc7c2f BB |
1326 | } |
1327 | ||
900d09b2 PS |
1328 | static void |
1329 | zil_lwb_flush_defer(lwb_t *lwb, lwb_t *nlwb) | |
1330 | { | |
1331 | avl_tree_t *src = &lwb->lwb_vdev_tree; | |
1332 | avl_tree_t *dst = &nlwb->lwb_vdev_tree; | |
1333 | void *cookie = NULL; | |
1334 | zil_vdev_node_t *zv; | |
1335 | ||
1336 | ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE); | |
1337 | ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_WRITE_DONE); | |
1338 | ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_FLUSH_DONE); | |
1339 | ||
1340 | /* | |
1341 | * While 'lwb' is at a point in its lifetime where lwb_vdev_tree does | |
1342 | * not need the protection of lwb_vdev_lock (it will only be modified | |
1343 | * while holding zilog->zl_lock) as its writes and those of its | |
1344 | * children have all completed. The younger 'nlwb' may be waiting on | |
1345 | * future writes to additional vdevs. | |
1346 | */ | |
1347 | mutex_enter(&nlwb->lwb_vdev_lock); | |
1348 | /* | |
1349 | * Tear down the 'lwb' vdev tree, ensuring that entries which do not | |
1350 | * exist in 'nlwb' are moved to it, freeing any would-be duplicates. | |
1351 | */ | |
1352 | while ((zv = avl_destroy_nodes(src, &cookie)) != NULL) { | |
1353 | avl_index_t where; | |
1354 | ||
1355 | if (avl_find(dst, zv, &where) == NULL) { | |
1356 | avl_insert(dst, zv, where); | |
1357 | } else { | |
1358 | kmem_free(zv, sizeof (*zv)); | |
1359 | } | |
1360 | } | |
1361 | mutex_exit(&nlwb->lwb_vdev_lock); | |
1362 | } | |
1363 | ||
1ce23dca PS |
1364 | void |
1365 | zil_lwb_add_txg(lwb_t *lwb, uint64_t txg) | |
1366 | { | |
1367 | lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); | |
1368 | } | |
1369 | ||
1370 | /* | |
900d09b2 | 1371 | * This function is a called after all vdevs associated with a given lwb |
1ce23dca | 1372 | * write have completed their DKIOCFLUSHWRITECACHE command; or as soon |
900d09b2 PS |
1373 | * as the lwb write completes, if "zil_nocacheflush" is set. Further, |
1374 | * all "previous" lwb's will have completed before this function is | |
1375 | * called; i.e. this function is called for all previous lwbs before | |
1376 | * it's called for "this" lwb (enforced via zio the dependencies | |
1377 | * configured in zil_lwb_set_zio_dependency()). | |
1ce23dca PS |
1378 | * |
1379 | * The intention is for this function to be called as soon as the | |
1380 | * contents of an lwb are considered "stable" on disk, and will survive | |
1381 | * any sudden loss of power. At this point, any threads waiting for the | |
1382 | * lwb to reach this state are signalled, and the "waiter" structures | |
1383 | * are marked "done". | |
1384 | */ | |
572e2857 | 1385 | static void |
1ce23dca | 1386 | zil_lwb_flush_vdevs_done(zio_t *zio) |
34dc7c2f | 1387 | { |
1ce23dca PS |
1388 | lwb_t *lwb = zio->io_private; |
1389 | zilog_t *zilog = lwb->lwb_zilog; | |
1ce23dca PS |
1390 | zil_commit_waiter_t *zcw; |
1391 | itx_t *itx; | |
152d6fda | 1392 | uint64_t txg; |
55b1842f | 1393 | list_t itxs, waiters; |
1ce23dca | 1394 | |
a604d324 GW |
1395 | spa_config_exit(zilog->zl_spa, SCL_STATE, lwb); |
1396 | ||
55b1842f AM |
1397 | list_create(&itxs, sizeof (itx_t), offsetof(itx_t, itx_node)); |
1398 | list_create(&waiters, sizeof (zil_commit_waiter_t), | |
1399 | offsetof(zil_commit_waiter_t, zcw_node)); | |
1400 | ||
895e0313 | 1401 | hrtime_t t = gethrtime() - lwb->lwb_issued_timestamp; |
34dc7c2f | 1402 | |
1ce23dca | 1403 | mutex_enter(&zilog->zl_lock); |
34dc7c2f | 1404 | |
895e0313 | 1405 | zilog->zl_last_lwb_latency = (zilog->zl_last_lwb_latency * 7 + t) / 8; |
34dc7c2f | 1406 | |
1ce23dca | 1407 | lwb->lwb_root_zio = NULL; |
900d09b2 | 1408 | |
1ce23dca PS |
1409 | if (zilog->zl_last_lwb_opened == lwb) { |
1410 | /* | |
1411 | * Remember the highest committed log sequence number | |
1412 | * for ztest. We only update this value when all the log | |
1413 | * writes succeeded, because ztest wants to ASSERT that | |
1414 | * it got the whole log chain. | |
1415 | */ | |
1416 | zilog->zl_commit_lr_seq = zilog->zl_lr_seq; | |
1417 | } | |
1418 | ||
55b1842f AM |
1419 | list_move_tail(&itxs, &lwb->lwb_itxs); |
1420 | list_move_tail(&waiters, &lwb->lwb_waiters); | |
a9d6b069 | 1421 | txg = lwb->lwb_issued_txg; |
55b1842f AM |
1422 | |
1423 | ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE); | |
1424 | lwb->lwb_state = LWB_STATE_FLUSH_DONE; | |
1425 | ||
f63811f0 AM |
1426 | mutex_exit(&zilog->zl_lock); |
1427 | ||
55b1842f | 1428 | while ((itx = list_remove_head(&itxs)) != NULL) |
1ce23dca | 1429 | zil_itx_destroy(itx); |
55b1842f | 1430 | list_destroy(&itxs); |
1ce23dca | 1431 | |
55b1842f | 1432 | while ((zcw = list_remove_head(&waiters)) != NULL) { |
1ce23dca PS |
1433 | mutex_enter(&zcw->zcw_lock); |
1434 | ||
1ce23dca | 1435 | zcw->zcw_lwb = NULL; |
f82f0279 AK |
1436 | /* |
1437 | * We expect any ZIO errors from child ZIOs to have been | |
1438 | * propagated "up" to this specific LWB's root ZIO, in | |
1439 | * order for this error handling to work correctly. This | |
1440 | * includes ZIO errors from either this LWB's write or | |
1441 | * flush, as well as any errors from other dependent LWBs | |
1442 | * (e.g. a root LWB ZIO that might be a child of this LWB). | |
1443 | * | |
1444 | * With that said, it's important to note that LWB flush | |
1445 | * errors are not propagated up to the LWB root ZIO. | |
1446 | * This is incorrect behavior, and results in VDEV flush | |
1447 | * errors not being handled correctly here. See the | |
1448 | * comment above the call to "zio_flush" for details. | |
1449 | */ | |
1ce23dca PS |
1450 | |
1451 | zcw->zcw_zio_error = zio->io_error; | |
1452 | ||
1453 | ASSERT3B(zcw->zcw_done, ==, B_FALSE); | |
1454 | zcw->zcw_done = B_TRUE; | |
1455 | cv_broadcast(&zcw->zcw_cv); | |
1456 | ||
1457 | mutex_exit(&zcw->zcw_lock); | |
34dc7c2f | 1458 | } |
55b1842f | 1459 | list_destroy(&waiters); |
34dc7c2f | 1460 | |
152d6fda | 1461 | mutex_enter(&zilog->zl_lwb_io_lock); |
152d6fda KJ |
1462 | ASSERT3U(zilog->zl_lwb_inflight[txg & TXG_MASK], >, 0); |
1463 | zilog->zl_lwb_inflight[txg & TXG_MASK]--; | |
1464 | if (zilog->zl_lwb_inflight[txg & TXG_MASK] == 0) | |
1465 | cv_broadcast(&zilog->zl_lwb_io_cv); | |
1466 | mutex_exit(&zilog->zl_lwb_io_lock); | |
1467 | } | |
1468 | ||
1469 | /* | |
1470 | * Wait for the completion of all issued write/flush of that txg provided. | |
1471 | * It guarantees zil_lwb_flush_vdevs_done() is called and returned. | |
1472 | */ | |
1473 | static void | |
1474 | zil_lwb_flush_wait_all(zilog_t *zilog, uint64_t txg) | |
1475 | { | |
1476 | ASSERT3U(txg, ==, spa_syncing_txg(zilog->zl_spa)); | |
1477 | ||
1478 | mutex_enter(&zilog->zl_lwb_io_lock); | |
1479 | while (zilog->zl_lwb_inflight[txg & TXG_MASK] > 0) | |
1480 | cv_wait(&zilog->zl_lwb_io_cv, &zilog->zl_lwb_io_lock); | |
1481 | mutex_exit(&zilog->zl_lwb_io_lock); | |
1482 | ||
1483 | #ifdef ZFS_DEBUG | |
1484 | mutex_enter(&zilog->zl_lock); | |
1485 | mutex_enter(&zilog->zl_lwb_io_lock); | |
1486 | lwb_t *lwb = list_head(&zilog->zl_lwb_list); | |
1487 | while (lwb != NULL && lwb->lwb_max_txg <= txg) { | |
1488 | if (lwb->lwb_issued_txg <= txg) { | |
1489 | ASSERT(lwb->lwb_state != LWB_STATE_ISSUED); | |
1490 | ASSERT(lwb->lwb_state != LWB_STATE_WRITE_DONE); | |
1491 | IMPLY(lwb->lwb_issued_txg > 0, | |
1492 | lwb->lwb_state == LWB_STATE_FLUSH_DONE); | |
1493 | } | |
7381ddf1 AM |
1494 | IMPLY(lwb->lwb_state == LWB_STATE_WRITE_DONE || |
1495 | lwb->lwb_state == LWB_STATE_FLUSH_DONE, | |
152d6fda KJ |
1496 | lwb->lwb_buf == NULL); |
1497 | lwb = list_next(&zilog->zl_lwb_list, lwb); | |
1498 | } | |
1499 | mutex_exit(&zilog->zl_lwb_io_lock); | |
1500 | mutex_exit(&zilog->zl_lock); | |
1501 | #endif | |
34dc7c2f BB |
1502 | } |
1503 | ||
1504 | /* | |
900d09b2 PS |
1505 | * This is called when an lwb's write zio completes. The callback's |
1506 | * purpose is to issue the DKIOCFLUSHWRITECACHE commands for the vdevs | |
1507 | * in the lwb's lwb_vdev_tree. The tree will contain the vdevs involved | |
1508 | * in writing out this specific lwb's data, and in the case that cache | |
1509 | * flushes have been deferred, vdevs involved in writing the data for | |
1510 | * previous lwbs. The writes corresponding to all the vdevs in the | |
1511 | * lwb_vdev_tree will have completed by the time this is called, due to | |
1512 | * the zio dependencies configured in zil_lwb_set_zio_dependency(), | |
1513 | * which takes deferred flushes into account. The lwb will be "done" | |
1514 | * once zil_lwb_flush_vdevs_done() is called, which occurs in the zio | |
1515 | * completion callback for the lwb's root zio. | |
34dc7c2f BB |
1516 | */ |
1517 | static void | |
1518 | zil_lwb_write_done(zio_t *zio) | |
1519 | { | |
1520 | lwb_t *lwb = zio->io_private; | |
1ce23dca | 1521 | spa_t *spa = zio->io_spa; |
34dc7c2f | 1522 | zilog_t *zilog = lwb->lwb_zilog; |
1ce23dca PS |
1523 | avl_tree_t *t = &lwb->lwb_vdev_tree; |
1524 | void *cookie = NULL; | |
1525 | zil_vdev_node_t *zv; | |
900d09b2 | 1526 | lwb_t *nlwb; |
1ce23dca | 1527 | |
a604d324 GW |
1528 | ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0); |
1529 | ||
b128c09f | 1530 | ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); |
b128c09f BB |
1531 | ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG); |
1532 | ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); | |
1533 | ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER); | |
1534 | ASSERT(!BP_IS_GANG(zio->io_bp)); | |
1535 | ASSERT(!BP_IS_HOLE(zio->io_bp)); | |
9b67f605 | 1536 | ASSERT(BP_GET_FILL(zio->io_bp) == 0); |
b128c09f | 1537 | |
e2af2acc | 1538 | abd_free(zio->io_abd); |
7381ddf1 AM |
1539 | zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); |
1540 | lwb->lwb_buf = NULL; | |
1ce23dca | 1541 | |
34dc7c2f | 1542 | mutex_enter(&zilog->zl_lock); |
900d09b2 PS |
1543 | ASSERT3S(lwb->lwb_state, ==, LWB_STATE_ISSUED); |
1544 | lwb->lwb_state = LWB_STATE_WRITE_DONE; | |
1ce23dca | 1545 | lwb->lwb_write_zio = NULL; |
900d09b2 | 1546 | nlwb = list_next(&zilog->zl_lwb_list, lwb); |
428870ff | 1547 | mutex_exit(&zilog->zl_lock); |
9babb374 | 1548 | |
1ce23dca PS |
1549 | if (avl_numnodes(t) == 0) |
1550 | return; | |
1551 | ||
9babb374 | 1552 | /* |
1ce23dca PS |
1553 | * If there was an IO error, we're not going to call zio_flush() |
1554 | * on these vdevs, so we simply empty the tree and free the | |
1555 | * nodes. We avoid calling zio_flush() since there isn't any | |
1556 | * good reason for doing so, after the lwb block failed to be | |
1557 | * written out. | |
f82f0279 AK |
1558 | * |
1559 | * Additionally, we don't perform any further error handling at | |
1560 | * this point (e.g. setting "zcw_zio_error" appropriately), as | |
1561 | * we expect that to occur in "zil_lwb_flush_vdevs_done" (thus, | |
1562 | * we expect any error seen here, to have been propagated to | |
1563 | * that function). | |
9babb374 | 1564 | */ |
1ce23dca PS |
1565 | if (zio->io_error != 0) { |
1566 | while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) | |
1567 | kmem_free(zv, sizeof (*zv)); | |
1568 | return; | |
1569 | } | |
1570 | ||
900d09b2 PS |
1571 | /* |
1572 | * If this lwb does not have any threads waiting for it to | |
1573 | * complete, we want to defer issuing the DKIOCFLUSHWRITECACHE | |
1574 | * command to the vdevs written to by "this" lwb, and instead | |
1575 | * rely on the "next" lwb to handle the DKIOCFLUSHWRITECACHE | |
1576 | * command for those vdevs. Thus, we merge the vdev tree of | |
1577 | * "this" lwb with the vdev tree of the "next" lwb in the list, | |
1578 | * and assume the "next" lwb will handle flushing the vdevs (or | |
1579 | * deferring the flush(s) again). | |
1580 | * | |
1581 | * This is a useful performance optimization, especially for | |
1582 | * workloads with lots of async write activity and few sync | |
1583 | * write and/or fsync activity, as it has the potential to | |
1584 | * coalesce multiple flush commands to a vdev into one. | |
1585 | */ | |
895e0313 | 1586 | if (list_is_empty(&lwb->lwb_waiters) && nlwb != NULL) { |
900d09b2 PS |
1587 | zil_lwb_flush_defer(lwb, nlwb); |
1588 | ASSERT(avl_is_empty(&lwb->lwb_vdev_tree)); | |
1589 | return; | |
1590 | } | |
1591 | ||
1ce23dca PS |
1592 | while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) { |
1593 | vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev); | |
895e0313 | 1594 | if (vd != NULL && !vd->vdev_nowritecache) { |
f82f0279 AK |
1595 | /* |
1596 | * The "ZIO_FLAG_DONT_PROPAGATE" is currently | |
1597 | * always used within "zio_flush". This means, | |
1598 | * any errors when flushing the vdev(s), will | |
1599 | * (unfortunately) not be handled correctly, | |
1600 | * since these "zio_flush" errors will not be | |
1601 | * propagated up to "zil_lwb_flush_vdevs_done". | |
1602 | */ | |
1ce23dca | 1603 | zio_flush(lwb->lwb_root_zio, vd); |
f82f0279 | 1604 | } |
1ce23dca PS |
1605 | kmem_free(zv, sizeof (*zv)); |
1606 | } | |
34dc7c2f BB |
1607 | } |
1608 | ||
900d09b2 PS |
1609 | static void |
1610 | zil_lwb_set_zio_dependency(zilog_t *zilog, lwb_t *lwb) | |
1611 | { | |
1612 | lwb_t *last_lwb_opened = zilog->zl_last_lwb_opened; | |
1613 | ||
1614 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); | |
1615 | ASSERT(MUTEX_HELD(&zilog->zl_lock)); | |
1616 | ||
1617 | /* | |
1618 | * The zilog's "zl_last_lwb_opened" field is used to build the | |
1619 | * lwb/zio dependency chain, which is used to preserve the | |
1620 | * ordering of lwb completions that is required by the semantics | |
1621 | * of the ZIL. Each new lwb zio becomes a parent of the | |
1622 | * "previous" lwb zio, such that the new lwb's zio cannot | |
1623 | * complete until the "previous" lwb's zio completes. | |
1624 | * | |
1625 | * This is required by the semantics of zil_commit(); the commit | |
1626 | * waiters attached to the lwbs will be woken in the lwb zio's | |
1627 | * completion callback, so this zio dependency graph ensures the | |
1628 | * waiters are woken in the correct order (the same order the | |
1629 | * lwbs were created). | |
1630 | */ | |
1631 | if (last_lwb_opened != NULL && | |
1632 | last_lwb_opened->lwb_state != LWB_STATE_FLUSH_DONE) { | |
1633 | ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED || | |
1634 | last_lwb_opened->lwb_state == LWB_STATE_ISSUED || | |
1635 | last_lwb_opened->lwb_state == LWB_STATE_WRITE_DONE); | |
1636 | ||
1637 | ASSERT3P(last_lwb_opened->lwb_root_zio, !=, NULL); | |
1638 | zio_add_child(lwb->lwb_root_zio, | |
1639 | last_lwb_opened->lwb_root_zio); | |
1640 | ||
1641 | /* | |
1642 | * If the previous lwb's write hasn't already completed, | |
1643 | * we also want to order the completion of the lwb write | |
1644 | * zios (above, we only order the completion of the lwb | |
1645 | * root zios). This is required because of how we can | |
1646 | * defer the DKIOCFLUSHWRITECACHE commands for each lwb. | |
1647 | * | |
612c4930 | 1648 | * When the DKIOCFLUSHWRITECACHE commands are deferred, |
900d09b2 PS |
1649 | * the previous lwb will rely on this lwb to flush the |
1650 | * vdevs written to by that previous lwb. Thus, we need | |
1651 | * to ensure this lwb doesn't issue the flush until | |
1652 | * after the previous lwb's write completes. We ensure | |
1653 | * this ordering by setting the zio parent/child | |
1654 | * relationship here. | |
1655 | * | |
1656 | * Without this relationship on the lwb's write zio, | |
1657 | * it's possible for this lwb's write to complete prior | |
1658 | * to the previous lwb's write completing; and thus, the | |
1659 | * vdevs for the previous lwb would be flushed prior to | |
1660 | * that lwb's data being written to those vdevs (the | |
1661 | * vdevs are flushed in the lwb write zio's completion | |
1662 | * handler, zil_lwb_write_done()). | |
1663 | */ | |
1664 | if (last_lwb_opened->lwb_state != LWB_STATE_WRITE_DONE) { | |
1665 | ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED || | |
1666 | last_lwb_opened->lwb_state == LWB_STATE_ISSUED); | |
1667 | ||
1668 | ASSERT3P(last_lwb_opened->lwb_write_zio, !=, NULL); | |
1669 | zio_add_child(lwb->lwb_write_zio, | |
1670 | last_lwb_opened->lwb_write_zio); | |
1671 | } | |
1672 | } | |
1673 | } | |
1674 | ||
1675 | ||
34dc7c2f | 1676 | /* |
1ce23dca PS |
1677 | * This function's purpose is to "open" an lwb such that it is ready to |
1678 | * accept new itxs being committed to it. To do this, the lwb's zio | |
1679 | * structures are created, and linked to the lwb. This function is | |
1680 | * idempotent; if the passed in lwb has already been opened, this | |
1681 | * function is essentially a no-op. | |
34dc7c2f BB |
1682 | */ |
1683 | static void | |
1ce23dca | 1684 | zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb) |
34dc7c2f | 1685 | { |
5dbd68a3 | 1686 | zbookmark_phys_t zb; |
1b7c1e5c | 1687 | zio_priority_t prio; |
34dc7c2f | 1688 | |
1b2b0aca | 1689 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca PS |
1690 | ASSERT3P(lwb, !=, NULL); |
1691 | EQUIV(lwb->lwb_root_zio == NULL, lwb->lwb_state == LWB_STATE_CLOSED); | |
1692 | EQUIV(lwb->lwb_root_zio != NULL, lwb->lwb_state == LWB_STATE_OPENED); | |
1693 | ||
f63811f0 AM |
1694 | if (lwb->lwb_root_zio != NULL) |
1695 | return; | |
1696 | ||
1697 | lwb->lwb_root_zio = zio_root(zilog->zl_spa, | |
1698 | zil_lwb_flush_vdevs_done, lwb, ZIO_FLAG_CANFAIL); | |
1699 | ||
1700 | abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf, | |
1701 | BP_GET_LSIZE(&lwb->lwb_blk)); | |
1702 | ||
1703 | if (!lwb->lwb_slog || zilog->zl_cur_used <= zil_slog_bulk) | |
1704 | prio = ZIO_PRIORITY_SYNC_WRITE; | |
1705 | else | |
1706 | prio = ZIO_PRIORITY_ASYNC_WRITE; | |
1707 | ||
428870ff BB |
1708 | SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET], |
1709 | ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, | |
1710 | lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]); | |
34dc7c2f | 1711 | |
f63811f0 AM |
1712 | lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio, zilog->zl_spa, 0, |
1713 | &lwb->lwb_blk, lwb_abd, BP_GET_LSIZE(&lwb->lwb_blk), | |
b22bab25 | 1714 | zil_lwb_write_done, lwb, prio, ZIO_FLAG_CANFAIL, &zb); |
1ce23dca | 1715 | |
b22bab25 | 1716 | mutex_enter(&zilog->zl_lock); |
f63811f0 | 1717 | lwb->lwb_state = LWB_STATE_OPENED; |
f63811f0 AM |
1718 | zil_lwb_set_zio_dependency(zilog, lwb); |
1719 | zilog->zl_last_lwb_opened = lwb; | |
920dd524 | 1720 | mutex_exit(&zilog->zl_lock); |
34dc7c2f BB |
1721 | } |
1722 | ||
428870ff BB |
1723 | /* |
1724 | * Define a limited set of intent log block sizes. | |
d3cc8b15 | 1725 | * |
428870ff BB |
1726 | * These must be a multiple of 4KB. Note only the amount used (again |
1727 | * aligned to 4KB) actually gets written. However, we can't always just | |
f1512ee6 | 1728 | * allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted. |
428870ff | 1729 | */ |
18168da7 | 1730 | static const struct { |
f15d6a54 AM |
1731 | uint64_t limit; |
1732 | uint64_t blksz; | |
1733 | } zil_block_buckets[] = { | |
1734 | { 4096, 4096 }, /* non TX_WRITE */ | |
1735 | { 8192 + 4096, 8192 + 4096 }, /* database */ | |
1736 | { 32768 + 4096, 32768 + 4096 }, /* NFS writes */ | |
1737 | { 65536 + 4096, 65536 + 4096 }, /* 64KB writes */ | |
1738 | { 131072, 131072 }, /* < 128KB writes */ | |
1739 | { 131072 +4096, 65536 + 4096 }, /* 128KB writes */ | |
1740 | { UINT64_MAX, SPA_OLD_MAXBLOCKSIZE}, /* > 128KB writes */ | |
428870ff BB |
1741 | }; |
1742 | ||
b8738257 MA |
1743 | /* |
1744 | * Maximum block size used by the ZIL. This is picked up when the ZIL is | |
1745 | * initialized. Otherwise this should not be used directly; see | |
1746 | * zl_max_block_size instead. | |
1747 | */ | |
fdc2d303 | 1748 | static uint_t zil_maxblocksize = SPA_OLD_MAXBLOCKSIZE; |
b8738257 | 1749 | |
34dc7c2f | 1750 | /* |
f63811f0 AM |
1751 | * Close the log block for being issued and allocate the next one. |
1752 | * Has to be called under zl_issuer_lock to chain more lwbs. | |
34dc7c2f BB |
1753 | */ |
1754 | static lwb_t * | |
2cb992a9 | 1755 | zil_lwb_write_close(zilog_t *zilog, lwb_t *lwb, list_t *ilwbs) |
34dc7c2f | 1756 | { |
428870ff BB |
1757 | lwb_t *nlwb = NULL; |
1758 | zil_chain_t *zilc; | |
34dc7c2f | 1759 | spa_t *spa = zilog->zl_spa; |
428870ff BB |
1760 | blkptr_t *bp; |
1761 | dmu_tx_t *tx; | |
34dc7c2f | 1762 | uint64_t txg; |
f63811f0 | 1763 | uint64_t zil_blksz; |
428870ff | 1764 | int i, error; |
1b7c1e5c | 1765 | boolean_t slog; |
428870ff | 1766 | |
1b2b0aca | 1767 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca PS |
1768 | ASSERT3P(lwb->lwb_root_zio, !=, NULL); |
1769 | ASSERT3P(lwb->lwb_write_zio, !=, NULL); | |
1770 | ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); | |
1771 | ||
f63811f0 AM |
1772 | /* |
1773 | * If this lwb includes indirect writes, we have to commit before | |
1774 | * creating the transaction, otherwise we may end up in dead lock. | |
1775 | */ | |
1776 | if (lwb->lwb_indirect) { | |
1777 | for (itx_t *itx = list_head(&lwb->lwb_itxs); itx; | |
1778 | itx = list_next(&lwb->lwb_itxs, itx)) | |
1779 | zil_lwb_commit(zilog, lwb, itx); | |
1780 | lwb->lwb_nused = lwb->lwb_nfilled; | |
428870ff | 1781 | } |
34dc7c2f | 1782 | |
34dc7c2f BB |
1783 | /* |
1784 | * Allocate the next block and save its address in this block | |
1785 | * before writing it in order to establish the log chain. | |
34dc7c2f | 1786 | */ |
1ce23dca | 1787 | |
428870ff | 1788 | tx = dmu_tx_create(zilog->zl_os); |
e98b6117 AG |
1789 | |
1790 | /* | |
0735ecb3 PS |
1791 | * Since we are not going to create any new dirty data, and we |
1792 | * can even help with clearing the existing dirty data, we | |
1793 | * should not be subject to the dirty data based delays. We | |
1794 | * use TXG_NOTHROTTLE to bypass the delay mechanism. | |
e98b6117 | 1795 | */ |
0735ecb3 PS |
1796 | VERIFY0(dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE)); |
1797 | ||
428870ff BB |
1798 | dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); |
1799 | txg = dmu_tx_get_txg(tx); | |
1800 | ||
152d6fda KJ |
1801 | mutex_enter(&zilog->zl_lwb_io_lock); |
1802 | lwb->lwb_issued_txg = txg; | |
1803 | zilog->zl_lwb_inflight[txg & TXG_MASK]++; | |
1804 | zilog->zl_lwb_max_issued_txg = MAX(txg, zilog->zl_lwb_max_issued_txg); | |
1805 | mutex_exit(&zilog->zl_lwb_io_lock); | |
34dc7c2f BB |
1806 | |
1807 | /* | |
428870ff BB |
1808 | * Log blocks are pre-allocated. Here we select the size of the next |
1809 | * block, based on size used in the last block. | |
1810 | * - first find the smallest bucket that will fit the block from a | |
1811 | * limited set of block sizes. This is because it's faster to write | |
1812 | * blocks allocated from the same metaslab as they are adjacent or | |
1813 | * close. | |
1814 | * - next find the maximum from the new suggested size and an array of | |
1815 | * previous sizes. This lessens a picket fence effect of wrongly | |
2fe61a7e | 1816 | * guessing the size if we have a stream of say 2k, 64k, 2k, 64k |
428870ff BB |
1817 | * requests. |
1818 | * | |
1819 | * Note we only write what is used, but we can't just allocate | |
1820 | * the maximum block size because we can exhaust the available | |
1821 | * pool log space. | |
34dc7c2f | 1822 | */ |
428870ff | 1823 | zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t); |
f15d6a54 | 1824 | for (i = 0; zil_blksz > zil_block_buckets[i].limit; i++) |
428870ff | 1825 | continue; |
f15d6a54 | 1826 | zil_blksz = MIN(zil_block_buckets[i].blksz, zilog->zl_max_block_size); |
428870ff BB |
1827 | zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz; |
1828 | for (i = 0; i < ZIL_PREV_BLKS; i++) | |
1829 | zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]); | |
b6fbe61f AM |
1830 | DTRACE_PROBE3(zil__block__size, zilog_t *, zilog, |
1831 | uint64_t, zil_blksz, | |
1832 | uint64_t, zilog->zl_prev_blks[zilog->zl_prev_rotor]); | |
428870ff | 1833 | zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1); |
34dc7c2f | 1834 | |
f63811f0 AM |
1835 | if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) |
1836 | zilc = (zil_chain_t *)lwb->lwb_buf; | |
1837 | else | |
1838 | zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz); | |
1839 | bp = &zilc->zc_next_blk; | |
34dc7c2f | 1840 | BP_ZERO(bp); |
b5256303 | 1841 | error = zio_alloc_zil(spa, zilog->zl_os, txg, bp, zil_blksz, &slog); |
13fe0198 | 1842 | if (error == 0) { |
428870ff BB |
1843 | ASSERT3U(bp->blk_birth, ==, txg); |
1844 | bp->blk_cksum = lwb->lwb_blk.blk_cksum; | |
1845 | bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++; | |
34dc7c2f BB |
1846 | |
1847 | /* | |
1ce23dca | 1848 | * Allocate a new log write block (lwb). |
34dc7c2f | 1849 | */ |
b22bab25 | 1850 | nlwb = zil_alloc_lwb(zilog, bp, slog, txg); |
34dc7c2f BB |
1851 | } |
1852 | ||
f63811f0 AM |
1853 | lwb->lwb_state = LWB_STATE_ISSUED; |
1854 | ||
1855 | dmu_tx_commit(tx); | |
1856 | ||
2cb992a9 AM |
1857 | /* |
1858 | * We need to acquire the config lock for the lwb to issue it later. | |
1859 | * However, if we already have a queue of closed parent lwbs already | |
1860 | * holding the config lock (but not yet issued), we can't block here | |
1861 | * waiting on the lock or we will deadlock. In that case we must | |
1862 | * first issue to parent IOs before waiting on the lock. | |
1863 | */ | |
1864 | if (ilwbs && !list_is_empty(ilwbs)) { | |
1865 | if (!spa_config_tryenter(spa, SCL_STATE, lwb, RW_READER)) { | |
1866 | lwb_t *tlwb; | |
1867 | while ((tlwb = list_remove_head(ilwbs)) != NULL) | |
1868 | zil_lwb_write_issue(zilog, tlwb); | |
1869 | spa_config_enter(spa, SCL_STATE, lwb, RW_READER); | |
1870 | } | |
1871 | } else { | |
1872 | spa_config_enter(spa, SCL_STATE, lwb, RW_READER); | |
1873 | } | |
1874 | ||
1875 | if (ilwbs) | |
1876 | list_insert_tail(ilwbs, lwb); | |
1877 | ||
f63811f0 AM |
1878 | /* |
1879 | * If there was an allocation failure then nlwb will be null which | |
1880 | * forces a txg_wait_synced(). | |
1881 | */ | |
1882 | return (nlwb); | |
1883 | } | |
1884 | ||
1885 | /* | |
1886 | * Finalize previously closed block and issue the write zio. | |
1887 | * Does not require locking. | |
1888 | */ | |
1889 | static void | |
1890 | zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb) | |
1891 | { | |
1892 | zil_chain_t *zilc; | |
1893 | int wsz; | |
1894 | ||
1895 | /* Actually fill the lwb with the data if not yet. */ | |
1896 | if (!lwb->lwb_indirect) { | |
1897 | for (itx_t *itx = list_head(&lwb->lwb_itxs); itx; | |
1898 | itx = list_next(&lwb->lwb_itxs, itx)) | |
1899 | zil_lwb_commit(zilog, lwb, itx); | |
1900 | lwb->lwb_nused = lwb->lwb_nfilled; | |
1901 | } | |
1902 | ||
428870ff BB |
1903 | if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) { |
1904 | /* For Slim ZIL only write what is used. */ | |
f63811f0 AM |
1905 | wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, int); |
1906 | ASSERT3S(wsz, <=, lwb->lwb_sz); | |
1ce23dca | 1907 | zio_shrink(lwb->lwb_write_zio, wsz); |
469019fb | 1908 | wsz = lwb->lwb_write_zio->io_size; |
34dc7c2f | 1909 | |
f63811f0 | 1910 | zilc = (zil_chain_t *)lwb->lwb_buf; |
428870ff BB |
1911 | } else { |
1912 | wsz = lwb->lwb_sz; | |
f63811f0 | 1913 | zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz); |
428870ff | 1914 | } |
428870ff BB |
1915 | zilc->zc_pad = 0; |
1916 | zilc->zc_nused = lwb->lwb_nused; | |
1917 | zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum; | |
34dc7c2f BB |
1918 | |
1919 | /* | |
428870ff | 1920 | * clear unused data for security |
34dc7c2f | 1921 | */ |
861166b0 | 1922 | memset(lwb->lwb_buf + lwb->lwb_nused, 0, wsz - lwb->lwb_nused); |
34dc7c2f | 1923 | |
f63811f0 AM |
1924 | if (lwb->lwb_slog) { |
1925 | ZIL_STAT_BUMP(zilog, zil_itx_metaslab_slog_count); | |
1926 | ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_bytes, | |
1927 | lwb->lwb_nused); | |
b6fbe61f AM |
1928 | ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_write, |
1929 | wsz); | |
1930 | ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_alloc, | |
1931 | BP_GET_LSIZE(&lwb->lwb_blk)); | |
f63811f0 AM |
1932 | } else { |
1933 | ZIL_STAT_BUMP(zilog, zil_itx_metaslab_normal_count); | |
1934 | ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_bytes, | |
1935 | lwb->lwb_nused); | |
b6fbe61f AM |
1936 | ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_write, |
1937 | wsz); | |
1938 | ZIL_STAT_INCR(zilog, zil_itx_metaslab_normal_alloc, | |
1939 | BP_GET_LSIZE(&lwb->lwb_blk)); | |
f63811f0 | 1940 | } |
2cb992a9 | 1941 | ASSERT(spa_config_held(zilog->zl_spa, SCL_STATE, RW_READER)); |
1ce23dca PS |
1942 | zil_lwb_add_block(lwb, &lwb->lwb_blk); |
1943 | lwb->lwb_issued_timestamp = gethrtime(); | |
1ce23dca PS |
1944 | zio_nowait(lwb->lwb_root_zio); |
1945 | zio_nowait(lwb->lwb_write_zio); | |
34dc7c2f BB |
1946 | } |
1947 | ||
b8738257 | 1948 | /* |
67a1b037 | 1949 | * Maximum amount of data that can be put into single log block. |
b8738257 MA |
1950 | */ |
1951 | uint64_t | |
67a1b037 | 1952 | zil_max_log_data(zilog_t *zilog, size_t hdrsize) |
b8738257 | 1953 | { |
67a1b037 | 1954 | return (zilog->zl_max_block_size - sizeof (zil_chain_t) - hdrsize); |
b8738257 MA |
1955 | } |
1956 | ||
1957 | /* | |
1958 | * Maximum amount of log space we agree to waste to reduce number of | |
1959 | * WR_NEED_COPY chunks to reduce zl_get_data() overhead (~12%). | |
1960 | */ | |
1961 | static inline uint64_t | |
1962 | zil_max_waste_space(zilog_t *zilog) | |
1963 | { | |
67a1b037 | 1964 | return (zil_max_log_data(zilog, sizeof (lr_write_t)) / 8); |
b8738257 MA |
1965 | } |
1966 | ||
1967 | /* | |
1968 | * Maximum amount of write data for WR_COPIED. For correctness, consumers | |
1969 | * must fall back to WR_NEED_COPY if we can't fit the entire record into one | |
1970 | * maximum sized log block, because each WR_COPIED record must fit in a | |
1971 | * single log block. For space efficiency, we want to fit two records into a | |
1972 | * max-sized log block. | |
1973 | */ | |
1974 | uint64_t | |
1975 | zil_max_copied_data(zilog_t *zilog) | |
1976 | { | |
1977 | return ((zilog->zl_max_block_size - sizeof (zil_chain_t)) / 2 - | |
1978 | sizeof (lr_write_t)); | |
1979 | } | |
1980 | ||
f63811f0 AM |
1981 | /* |
1982 | * Estimate space needed in the lwb for the itx. Allocate more lwbs or | |
1983 | * split the itx as needed, but don't touch the actual transaction data. | |
1984 | * Has to be called under zl_issuer_lock to call zil_lwb_write_close() | |
1985 | * to chain more lwbs. | |
1986 | */ | |
34dc7c2f | 1987 | static lwb_t * |
f63811f0 | 1988 | zil_lwb_assign(zilog_t *zilog, lwb_t *lwb, itx_t *itx, list_t *ilwbs) |
34dc7c2f | 1989 | { |
f63811f0 AM |
1990 | itx_t *citx; |
1991 | lr_t *lr, *clr; | |
1992 | lr_write_t *lrw; | |
1993 | uint64_t dlen, dnow, lwb_sp, reclen, max_log_data; | |
34dc7c2f | 1994 | |
1b2b0aca | 1995 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca PS |
1996 | ASSERT3P(lwb, !=, NULL); |
1997 | ASSERT3P(lwb->lwb_buf, !=, NULL); | |
1998 | ||
1999 | zil_lwb_write_open(zilog, lwb); | |
428870ff | 2000 | |
f63811f0 AM |
2001 | lr = &itx->itx_lr; |
2002 | lrw = (lr_write_t *)lr; | |
1ce23dca PS |
2003 | |
2004 | /* | |
2005 | * A commit itx doesn't represent any on-disk state; instead | |
2006 | * it's simply used as a place holder on the commit list, and | |
2007 | * provides a mechanism for attaching a "commit waiter" onto the | |
2008 | * correct lwb (such that the waiter can be signalled upon | |
2009 | * completion of that lwb). Thus, we don't process this itx's | |
2010 | * log record if it's a commit itx (these itx's don't have log | |
2011 | * records), and instead link the itx's waiter onto the lwb's | |
2012 | * list of waiters. | |
2013 | * | |
2014 | * For more details, see the comment above zil_commit(). | |
2015 | */ | |
f63811f0 | 2016 | if (lr->lrc_txtype == TX_COMMIT) { |
2fe61a7e | 2017 | mutex_enter(&zilog->zl_lock); |
1ce23dca PS |
2018 | zil_commit_waiter_link_lwb(itx->itx_private, lwb); |
2019 | itx->itx_private = NULL; | |
2fe61a7e | 2020 | mutex_exit(&zilog->zl_lock); |
f63811f0 | 2021 | list_insert_tail(&lwb->lwb_itxs, itx); |
1ce23dca PS |
2022 | return (lwb); |
2023 | } | |
34dc7c2f | 2024 | |
f63811f0 | 2025 | if (lr->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) { |
34dc7c2f | 2026 | dlen = P2ROUNDUP_TYPED( |
428870ff | 2027 | lrw->lr_length, sizeof (uint64_t), uint64_t); |
1b7c1e5c | 2028 | } else { |
f63811f0 | 2029 | dlen = 0; |
1b7c1e5c | 2030 | } |
f63811f0 | 2031 | reclen = lr->lrc_reclen; |
34dc7c2f BB |
2032 | zilog->zl_cur_used += (reclen + dlen); |
2033 | ||
1b7c1e5c | 2034 | cont: |
34dc7c2f BB |
2035 | /* |
2036 | * If this record won't fit in the current log block, start a new one. | |
1b7c1e5c | 2037 | * For WR_NEED_COPY optimize layout for minimal number of chunks. |
34dc7c2f | 2038 | */ |
1b7c1e5c | 2039 | lwb_sp = lwb->lwb_sz - lwb->lwb_nused; |
67a1b037 | 2040 | max_log_data = zil_max_log_data(zilog, sizeof (lr_write_t)); |
1b7c1e5c | 2041 | if (reclen > lwb_sp || (reclen + dlen > lwb_sp && |
b8738257 MA |
2042 | lwb_sp < zil_max_waste_space(zilog) && |
2043 | (dlen % max_log_data == 0 || | |
2044 | lwb_sp < reclen + dlen % max_log_data))) { | |
2cb992a9 | 2045 | lwb = zil_lwb_write_close(zilog, lwb, ilwbs); |
34dc7c2f BB |
2046 | if (lwb == NULL) |
2047 | return (NULL); | |
1ce23dca | 2048 | zil_lwb_write_open(zilog, lwb); |
1b7c1e5c | 2049 | lwb_sp = lwb->lwb_sz - lwb->lwb_nused; |
b8738257 MA |
2050 | |
2051 | /* | |
2052 | * There must be enough space in the new, empty log block to | |
2053 | * hold reclen. For WR_COPIED, we need to fit the whole | |
2054 | * record in one block, and reclen is the header size + the | |
2055 | * data size. For WR_NEED_COPY, we can create multiple | |
2056 | * records, splitting the data into multiple blocks, so we | |
2057 | * only need to fit one word of data per block; in this case | |
2058 | * reclen is just the header size (no data). | |
2059 | */ | |
1b7c1e5c | 2060 | ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp); |
34dc7c2f BB |
2061 | } |
2062 | ||
1b7c1e5c | 2063 | dnow = MIN(dlen, lwb_sp - reclen); |
f63811f0 AM |
2064 | if (dlen > dnow) { |
2065 | ASSERT3U(lr->lrc_txtype, ==, TX_WRITE); | |
2066 | ASSERT3U(itx->itx_wr_state, ==, WR_NEED_COPY); | |
2067 | citx = zil_itx_clone(itx); | |
2068 | clr = &citx->itx_lr; | |
2069 | lr_write_t *clrw = (lr_write_t *)clr; | |
2070 | clrw->lr_length = dnow; | |
2071 | lrw->lr_offset += dnow; | |
2072 | lrw->lr_length -= dnow; | |
2073 | } else { | |
2074 | citx = itx; | |
2075 | clr = lr; | |
2076 | } | |
2077 | ||
2078 | /* | |
2079 | * We're actually making an entry, so update lrc_seq to be the | |
2080 | * log record sequence number. Note that this is generally not | |
2081 | * equal to the itx sequence number because not all transactions | |
2082 | * are synchronous, and sometimes spa_sync() gets there first. | |
2083 | */ | |
2084 | clr->lrc_seq = ++zilog->zl_lr_seq; | |
2085 | ||
2086 | lwb->lwb_nused += reclen + dnow; | |
2087 | ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz); | |
2088 | ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t))); | |
2089 | ||
2090 | zil_lwb_add_txg(lwb, lr->lrc_txg); | |
2091 | list_insert_tail(&lwb->lwb_itxs, citx); | |
2092 | ||
2093 | dlen -= dnow; | |
2094 | if (dlen > 0) { | |
2095 | zilog->zl_cur_used += reclen; | |
2096 | goto cont; | |
2097 | } | |
2098 | ||
2099 | /* | |
2100 | * We have to really issue all queued LWBs before we may have to | |
2101 | * wait for a txg sync. Otherwise we may end up in a dead lock. | |
2102 | */ | |
2103 | if (lr->lrc_txtype == TX_WRITE) { | |
2104 | boolean_t frozen = lr->lrc_txg > spa_freeze_txg(zilog->zl_spa); | |
2105 | if (frozen || itx->itx_wr_state == WR_INDIRECT) { | |
2106 | lwb_t *tlwb; | |
2107 | while ((tlwb = list_remove_head(ilwbs)) != NULL) | |
2108 | zil_lwb_write_issue(zilog, tlwb); | |
2109 | } | |
2110 | if (itx->itx_wr_state == WR_INDIRECT) | |
2111 | lwb->lwb_indirect = B_TRUE; | |
2112 | if (frozen) | |
2113 | txg_wait_synced(zilog->zl_dmu_pool, lr->lrc_txg); | |
2114 | } | |
2115 | ||
2116 | return (lwb); | |
2117 | } | |
2118 | ||
2119 | /* | |
2120 | * Fill the actual transaction data into the lwb, following zil_lwb_assign(). | |
2121 | * Does not require locking. | |
2122 | */ | |
2123 | static void | |
2124 | zil_lwb_commit(zilog_t *zilog, lwb_t *lwb, itx_t *itx) | |
2125 | { | |
2126 | lr_t *lr, *lrb; | |
2127 | lr_write_t *lrw, *lrwb; | |
2128 | char *lr_buf; | |
2129 | uint64_t dlen, reclen; | |
2130 | ||
2131 | lr = &itx->itx_lr; | |
2132 | lrw = (lr_write_t *)lr; | |
2133 | ||
2134 | if (lr->lrc_txtype == TX_COMMIT) | |
2135 | return; | |
2136 | ||
2137 | if (lr->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) { | |
2138 | dlen = P2ROUNDUP_TYPED( | |
2139 | lrw->lr_length, sizeof (uint64_t), uint64_t); | |
2140 | } else { | |
2141 | dlen = 0; | |
2142 | } | |
2143 | reclen = lr->lrc_reclen; | |
2144 | ASSERT3U(reclen + dlen, <=, lwb->lwb_nused - lwb->lwb_nfilled); | |
2145 | ||
2146 | lr_buf = lwb->lwb_buf + lwb->lwb_nfilled; | |
2147 | memcpy(lr_buf, lr, reclen); | |
2148 | lrb = (lr_t *)lr_buf; /* Like lr, but inside lwb. */ | |
2149 | lrwb = (lr_write_t *)lrb; /* Like lrw, but inside lwb. */ | |
34dc7c2f | 2150 | |
fb087146 | 2151 | ZIL_STAT_BUMP(zilog, zil_itx_count); |
b6ad9671 | 2152 | |
34dc7c2f BB |
2153 | /* |
2154 | * If it's a write, fetch the data or get its blkptr as appropriate. | |
2155 | */ | |
f63811f0 | 2156 | if (lr->lrc_txtype == TX_WRITE) { |
b6ad9671 | 2157 | if (itx->itx_wr_state == WR_COPIED) { |
fb087146 AH |
2158 | ZIL_STAT_BUMP(zilog, zil_itx_copied_count); |
2159 | ZIL_STAT_INCR(zilog, zil_itx_copied_bytes, | |
2160 | lrw->lr_length); | |
b6ad9671 | 2161 | } else { |
34dc7c2f BB |
2162 | char *dbuf; |
2163 | int error; | |
2164 | ||
1b7c1e5c | 2165 | if (itx->itx_wr_state == WR_NEED_COPY) { |
428870ff | 2166 | dbuf = lr_buf + reclen; |
f63811f0 | 2167 | lrb->lrc_reclen += dlen; |
fb087146 AH |
2168 | ZIL_STAT_BUMP(zilog, zil_itx_needcopy_count); |
2169 | ZIL_STAT_INCR(zilog, zil_itx_needcopy_bytes, | |
f63811f0 | 2170 | dlen); |
34dc7c2f | 2171 | } else { |
1ce23dca | 2172 | ASSERT3S(itx->itx_wr_state, ==, WR_INDIRECT); |
34dc7c2f | 2173 | dbuf = NULL; |
fb087146 AH |
2174 | ZIL_STAT_BUMP(zilog, zil_itx_indirect_count); |
2175 | ZIL_STAT_INCR(zilog, zil_itx_indirect_bytes, | |
d1d7e268 | 2176 | lrw->lr_length); |
34dc7c2f | 2177 | } |
1ce23dca PS |
2178 | |
2179 | /* | |
2180 | * We pass in the "lwb_write_zio" rather than | |
2181 | * "lwb_root_zio" so that the "lwb_write_zio" | |
2182 | * becomes the parent of any zio's created by | |
2183 | * the "zl_get_data" callback. The vdevs are | |
2184 | * flushed after the "lwb_write_zio" completes, | |
2185 | * so we want to make sure that completion | |
2186 | * callback waits for these additional zio's, | |
2187 | * such that the vdevs used by those zio's will | |
2188 | * be included in the lwb's vdev tree, and those | |
2189 | * vdevs will be properly flushed. If we passed | |
2190 | * in "lwb_root_zio" here, then these additional | |
2191 | * vdevs may not be flushed; e.g. if these zio's | |
2192 | * completed after "lwb_write_zio" completed. | |
2193 | */ | |
2194 | error = zilog->zl_get_data(itx->itx_private, | |
296a4a36 CC |
2195 | itx->itx_gen, lrwb, dbuf, lwb, |
2196 | lwb->lwb_write_zio); | |
f63811f0 | 2197 | if (dbuf != NULL && error == 0) { |
3a185275 | 2198 | /* Zero any padding bytes in the last block. */ |
f63811f0 AM |
2199 | memset((char *)dbuf + lrwb->lr_length, 0, |
2200 | dlen - lrwb->lr_length); | |
2201 | } | |
1ce23dca | 2202 | |
3a7c3511 RY |
2203 | /* |
2204 | * Typically, the only return values we should see from | |
2205 | * ->zl_get_data() are 0, EIO, ENOENT, EEXIST or | |
2206 | * EALREADY. However, it is also possible to see other | |
2207 | * error values such as ENOSPC or EINVAL from | |
2208 | * dmu_read() -> dnode_hold() -> dnode_hold_impl() or | |
2209 | * ENXIO as well as a multitude of others from the | |
2210 | * block layer through dmu_buf_hold() -> dbuf_read() | |
2211 | * -> zio_wait(), as well as through dmu_read() -> | |
2212 | * dnode_hold() -> dnode_hold_impl() -> dbuf_read() -> | |
2213 | * zio_wait(). When these errors happen, we can assume | |
2214 | * that neither an immediate write nor an indirect | |
2215 | * write occurred, so we need to fall back to | |
2216 | * txg_wait_synced(). This is unusual, so we print to | |
2217 | * dmesg whenever one of these errors occurs. | |
2218 | */ | |
2219 | switch (error) { | |
2220 | case 0: | |
2221 | break; | |
2222 | default: | |
2223 | cmn_err(CE_WARN, "zil_lwb_commit() received " | |
2224 | "unexpected error %d from ->zl_get_data()" | |
2225 | ". Falling back to txg_wait_synced().", | |
2226 | error); | |
2227 | zfs_fallthrough; | |
2228 | case EIO: | |
f63811f0 AM |
2229 | if (lwb->lwb_indirect) { |
2230 | txg_wait_synced(zilog->zl_dmu_pool, | |
2231 | lr->lrc_txg); | |
2232 | } else { | |
2233 | lwb->lwb_write_zio->io_error = error; | |
2234 | } | |
3a7c3511 RY |
2235 | zfs_fallthrough; |
2236 | case ENOENT: | |
2237 | zfs_fallthrough; | |
2238 | case EEXIST: | |
2239 | zfs_fallthrough; | |
2240 | case EALREADY: | |
f63811f0 | 2241 | return; |
34dc7c2f BB |
2242 | } |
2243 | } | |
2244 | } | |
2245 | ||
f63811f0 AM |
2246 | lwb->lwb_nfilled += reclen + dlen; |
2247 | ASSERT3S(lwb->lwb_nfilled, <=, lwb->lwb_nused); | |
2248 | ASSERT0(P2PHASE(lwb->lwb_nfilled, sizeof (uint64_t))); | |
34dc7c2f BB |
2249 | } |
2250 | ||
2251 | itx_t * | |
58714c28 | 2252 | zil_itx_create(uint64_t txtype, size_t olrsize) |
34dc7c2f | 2253 | { |
58714c28 | 2254 | size_t itxsize, lrsize; |
34dc7c2f BB |
2255 | itx_t *itx; |
2256 | ||
58714c28 | 2257 | lrsize = P2ROUNDUP_TYPED(olrsize, sizeof (uint64_t), size_t); |
72841b9f | 2258 | itxsize = offsetof(itx_t, itx_lr) + lrsize; |
34dc7c2f | 2259 | |
72841b9f | 2260 | itx = zio_data_buf_alloc(itxsize); |
34dc7c2f BB |
2261 | itx->itx_lr.lrc_txtype = txtype; |
2262 | itx->itx_lr.lrc_reclen = lrsize; | |
34dc7c2f | 2263 | itx->itx_lr.lrc_seq = 0; /* defensive */ |
861166b0 | 2264 | memset((char *)&itx->itx_lr + olrsize, 0, lrsize - olrsize); |
572e2857 | 2265 | itx->itx_sync = B_TRUE; /* default is synchronous */ |
119a394a ED |
2266 | itx->itx_callback = NULL; |
2267 | itx->itx_callback_data = NULL; | |
72841b9f | 2268 | itx->itx_size = itxsize; |
34dc7c2f BB |
2269 | |
2270 | return (itx); | |
2271 | } | |
2272 | ||
f63811f0 AM |
2273 | static itx_t * |
2274 | zil_itx_clone(itx_t *oitx) | |
2275 | { | |
2276 | itx_t *itx = zio_data_buf_alloc(oitx->itx_size); | |
2277 | memcpy(itx, oitx, oitx->itx_size); | |
2278 | itx->itx_callback = NULL; | |
2279 | itx->itx_callback_data = NULL; | |
2280 | return (itx); | |
2281 | } | |
2282 | ||
428870ff BB |
2283 | void |
2284 | zil_itx_destroy(itx_t *itx) | |
2285 | { | |
1ce23dca PS |
2286 | IMPLY(itx->itx_lr.lrc_txtype == TX_COMMIT, itx->itx_callback == NULL); |
2287 | IMPLY(itx->itx_callback != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT); | |
2288 | ||
2289 | if (itx->itx_callback != NULL) | |
2290 | itx->itx_callback(itx->itx_callback_data); | |
2291 | ||
72841b9f | 2292 | zio_data_buf_free(itx, itx->itx_size); |
428870ff BB |
2293 | } |
2294 | ||
572e2857 BB |
2295 | /* |
2296 | * Free up the sync and async itxs. The itxs_t has already been detached | |
2297 | * so no locks are needed. | |
2298 | */ | |
2299 | static void | |
23c13c7e | 2300 | zil_itxg_clean(void *arg) |
34dc7c2f | 2301 | { |
572e2857 BB |
2302 | itx_t *itx; |
2303 | list_t *list; | |
2304 | avl_tree_t *t; | |
2305 | void *cookie; | |
23c13c7e | 2306 | itxs_t *itxs = arg; |
572e2857 BB |
2307 | itx_async_node_t *ian; |
2308 | ||
2309 | list = &itxs->i_sync_list; | |
895e0313 | 2310 | while ((itx = list_remove_head(list)) != NULL) { |
1ce23dca PS |
2311 | /* |
2312 | * In the general case, commit itxs will not be found | |
2313 | * here, as they'll be committed to an lwb via | |
f63811f0 | 2314 | * zil_lwb_assign(), and free'd in that function. Having |
1ce23dca PS |
2315 | * said that, it is still possible for commit itxs to be |
2316 | * found here, due to the following race: | |
2317 | * | |
2318 | * - a thread calls zil_commit() which assigns the | |
2319 | * commit itx to a per-txg i_sync_list | |
2320 | * - zil_itxg_clean() is called (e.g. via spa_sync()) | |
2321 | * while the waiter is still on the i_sync_list | |
2322 | * | |
2323 | * There's nothing to prevent syncing the txg while the | |
2324 | * waiter is on the i_sync_list. This normally doesn't | |
2325 | * happen because spa_sync() is slower than zil_commit(), | |
2326 | * but if zil_commit() calls txg_wait_synced() (e.g. | |
2327 | * because zil_create() or zil_commit_writer_stall() is | |
2328 | * called) we will hit this case. | |
2329 | */ | |
2330 | if (itx->itx_lr.lrc_txtype == TX_COMMIT) | |
2331 | zil_commit_waiter_skip(itx->itx_private); | |
2332 | ||
19ea3d25 | 2333 | zil_itx_destroy(itx); |
572e2857 | 2334 | } |
34dc7c2f | 2335 | |
572e2857 BB |
2336 | cookie = NULL; |
2337 | t = &itxs->i_async_tree; | |
2338 | while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { | |
2339 | list = &ian->ia_list; | |
895e0313 | 2340 | while ((itx = list_remove_head(list)) != NULL) { |
1ce23dca PS |
2341 | /* commit itxs should never be on the async lists. */ |
2342 | ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT); | |
19ea3d25 | 2343 | zil_itx_destroy(itx); |
572e2857 BB |
2344 | } |
2345 | list_destroy(list); | |
2346 | kmem_free(ian, sizeof (itx_async_node_t)); | |
2347 | } | |
2348 | avl_destroy(t); | |
34dc7c2f | 2349 | |
572e2857 BB |
2350 | kmem_free(itxs, sizeof (itxs_t)); |
2351 | } | |
34dc7c2f | 2352 | |
572e2857 BB |
2353 | static int |
2354 | zil_aitx_compare(const void *x1, const void *x2) | |
2355 | { | |
2356 | const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid; | |
2357 | const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid; | |
2358 | ||
ca577779 | 2359 | return (TREE_CMP(o1, o2)); |
34dc7c2f BB |
2360 | } |
2361 | ||
2362 | /* | |
572e2857 | 2363 | * Remove all async itx with the given oid. |
34dc7c2f | 2364 | */ |
8e556c5e | 2365 | void |
572e2857 | 2366 | zil_remove_async(zilog_t *zilog, uint64_t oid) |
34dc7c2f | 2367 | { |
572e2857 BB |
2368 | uint64_t otxg, txg; |
2369 | itx_async_node_t *ian; | |
2370 | avl_tree_t *t; | |
2371 | avl_index_t where; | |
34dc7c2f BB |
2372 | list_t clean_list; |
2373 | itx_t *itx; | |
2374 | ||
572e2857 | 2375 | ASSERT(oid != 0); |
34dc7c2f BB |
2376 | list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node)); |
2377 | ||
572e2857 BB |
2378 | if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ |
2379 | otxg = ZILTEST_TXG; | |
2380 | else | |
2381 | otxg = spa_last_synced_txg(zilog->zl_spa) + 1; | |
34dc7c2f | 2382 | |
572e2857 BB |
2383 | for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { |
2384 | itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; | |
2385 | ||
2386 | mutex_enter(&itxg->itxg_lock); | |
2387 | if (itxg->itxg_txg != txg) { | |
2388 | mutex_exit(&itxg->itxg_lock); | |
2389 | continue; | |
2390 | } | |
34dc7c2f | 2391 | |
572e2857 BB |
2392 | /* |
2393 | * Locate the object node and append its list. | |
2394 | */ | |
2395 | t = &itxg->itxg_itxs->i_async_tree; | |
2396 | ian = avl_find(t, &oid, &where); | |
2397 | if (ian != NULL) | |
2398 | list_move_tail(&clean_list, &ian->ia_list); | |
2399 | mutex_exit(&itxg->itxg_lock); | |
2400 | } | |
895e0313 | 2401 | while ((itx = list_remove_head(&clean_list)) != NULL) { |
1ce23dca PS |
2402 | /* commit itxs should never be on the async lists. */ |
2403 | ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT); | |
19ea3d25 | 2404 | zil_itx_destroy(itx); |
34dc7c2f BB |
2405 | } |
2406 | list_destroy(&clean_list); | |
2407 | } | |
2408 | ||
572e2857 BB |
2409 | void |
2410 | zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) | |
2411 | { | |
2412 | uint64_t txg; | |
2413 | itxg_t *itxg; | |
2414 | itxs_t *itxs, *clean = NULL; | |
2415 | ||
572e2857 BB |
2416 | /* |
2417 | * Ensure the data of a renamed file is committed before the rename. | |
2418 | */ | |
2419 | if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME) | |
2420 | zil_async_to_sync(zilog, itx->itx_oid); | |
2421 | ||
29809a6c | 2422 | if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) |
572e2857 BB |
2423 | txg = ZILTEST_TXG; |
2424 | else | |
2425 | txg = dmu_tx_get_txg(tx); | |
2426 | ||
2427 | itxg = &zilog->zl_itxg[txg & TXG_MASK]; | |
2428 | mutex_enter(&itxg->itxg_lock); | |
2429 | itxs = itxg->itxg_itxs; | |
2430 | if (itxg->itxg_txg != txg) { | |
2431 | if (itxs != NULL) { | |
2432 | /* | |
2433 | * The zil_clean callback hasn't got around to cleaning | |
2434 | * this itxg. Save the itxs for release below. | |
2435 | * This should be rare. | |
2436 | */ | |
55922e73 | 2437 | zfs_dbgmsg("zil_itx_assign: missed itx cleanup for " |
8e739b2c | 2438 | "txg %llu", (u_longlong_t)itxg->itxg_txg); |
572e2857 BB |
2439 | clean = itxg->itxg_itxs; |
2440 | } | |
572e2857 | 2441 | itxg->itxg_txg = txg; |
d1d7e268 | 2442 | itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), |
79c76d5b | 2443 | KM_SLEEP); |
572e2857 BB |
2444 | |
2445 | list_create(&itxs->i_sync_list, sizeof (itx_t), | |
2446 | offsetof(itx_t, itx_node)); | |
2447 | avl_create(&itxs->i_async_tree, zil_aitx_compare, | |
2448 | sizeof (itx_async_node_t), | |
2449 | offsetof(itx_async_node_t, ia_node)); | |
2450 | } | |
2451 | if (itx->itx_sync) { | |
2452 | list_insert_tail(&itxs->i_sync_list, itx); | |
572e2857 BB |
2453 | } else { |
2454 | avl_tree_t *t = &itxs->i_async_tree; | |
50c957f7 NB |
2455 | uint64_t foid = |
2456 | LR_FOID_GET_OBJ(((lr_ooo_t *)&itx->itx_lr)->lr_foid); | |
572e2857 BB |
2457 | itx_async_node_t *ian; |
2458 | avl_index_t where; | |
2459 | ||
2460 | ian = avl_find(t, &foid, &where); | |
2461 | if (ian == NULL) { | |
d1d7e268 | 2462 | ian = kmem_alloc(sizeof (itx_async_node_t), |
79c76d5b | 2463 | KM_SLEEP); |
572e2857 BB |
2464 | list_create(&ian->ia_list, sizeof (itx_t), |
2465 | offsetof(itx_t, itx_node)); | |
2466 | ian->ia_foid = foid; | |
2467 | avl_insert(t, ian, where); | |
2468 | } | |
2469 | list_insert_tail(&ian->ia_list, itx); | |
2470 | } | |
2471 | ||
2472 | itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); | |
1ce23dca PS |
2473 | |
2474 | /* | |
2475 | * We don't want to dirty the ZIL using ZILTEST_TXG, because | |
2476 | * zil_clean() will never be called using ZILTEST_TXG. Thus, we | |
2477 | * need to be careful to always dirty the ZIL using the "real" | |
2478 | * TXG (not itxg_txg) even when the SPA is frozen. | |
2479 | */ | |
2480 | zilog_dirty(zilog, dmu_tx_get_txg(tx)); | |
572e2857 BB |
2481 | mutex_exit(&itxg->itxg_lock); |
2482 | ||
2483 | /* Release the old itxs now we've dropped the lock */ | |
2484 | if (clean != NULL) | |
2485 | zil_itxg_clean(clean); | |
2486 | } | |
2487 | ||
34dc7c2f BB |
2488 | /* |
2489 | * If there are any in-memory intent log transactions which have now been | |
29809a6c | 2490 | * synced then start up a taskq to free them. We should only do this after we |
e1cfd73f | 2491 | * have written out the uberblocks (i.e. txg has been committed) so that |
29809a6c MA |
2492 | * don't inadvertently clean out in-memory log records that would be required |
2493 | * by zil_commit(). | |
34dc7c2f BB |
2494 | */ |
2495 | void | |
572e2857 | 2496 | zil_clean(zilog_t *zilog, uint64_t synced_txg) |
34dc7c2f | 2497 | { |
572e2857 BB |
2498 | itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK]; |
2499 | itxs_t *clean_me; | |
34dc7c2f | 2500 | |
1ce23dca PS |
2501 | ASSERT3U(synced_txg, <, ZILTEST_TXG); |
2502 | ||
572e2857 BB |
2503 | mutex_enter(&itxg->itxg_lock); |
2504 | if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) { | |
2505 | mutex_exit(&itxg->itxg_lock); | |
2506 | return; | |
2507 | } | |
2508 | ASSERT3U(itxg->itxg_txg, <=, synced_txg); | |
a032ac4b | 2509 | ASSERT3U(itxg->itxg_txg, !=, 0); |
572e2857 BB |
2510 | clean_me = itxg->itxg_itxs; |
2511 | itxg->itxg_itxs = NULL; | |
2512 | itxg->itxg_txg = 0; | |
2513 | mutex_exit(&itxg->itxg_lock); | |
2514 | /* | |
2515 | * Preferably start a task queue to free up the old itxs but | |
2516 | * if taskq_dispatch can't allocate resources to do that then | |
2517 | * free it in-line. This should be rare. Note, using TQ_SLEEP | |
2518 | * created a bad performance problem. | |
2519 | */ | |
a032ac4b BB |
2520 | ASSERT3P(zilog->zl_dmu_pool, !=, NULL); |
2521 | ASSERT3P(zilog->zl_dmu_pool->dp_zil_clean_taskq, !=, NULL); | |
2522 | taskqid_t id = taskq_dispatch(zilog->zl_dmu_pool->dp_zil_clean_taskq, | |
23c13c7e | 2523 | zil_itxg_clean, clean_me, TQ_NOSLEEP); |
a032ac4b | 2524 | if (id == TASKQID_INVALID) |
572e2857 BB |
2525 | zil_itxg_clean(clean_me); |
2526 | } | |
2527 | ||
2528 | /* | |
1ce23dca PS |
2529 | * This function will traverse the queue of itxs that need to be |
2530 | * committed, and move them onto the ZIL's zl_itx_commit_list. | |
572e2857 | 2531 | */ |
233425a1 | 2532 | static uint64_t |
572e2857 BB |
2533 | zil_get_commit_list(zilog_t *zilog) |
2534 | { | |
233425a1 | 2535 | uint64_t otxg, txg, wtxg = 0; |
572e2857 | 2536 | list_t *commit_list = &zilog->zl_itx_commit_list; |
572e2857 | 2537 | |
1b2b0aca | 2538 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca | 2539 | |
572e2857 BB |
2540 | if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ |
2541 | otxg = ZILTEST_TXG; | |
2542 | else | |
2543 | otxg = spa_last_synced_txg(zilog->zl_spa) + 1; | |
2544 | ||
55922e73 GW |
2545 | /* |
2546 | * This is inherently racy, since there is nothing to prevent | |
2547 | * the last synced txg from changing. That's okay since we'll | |
2548 | * only commit things in the future. | |
2549 | */ | |
572e2857 BB |
2550 | for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { |
2551 | itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; | |
2552 | ||
2553 | mutex_enter(&itxg->itxg_lock); | |
2554 | if (itxg->itxg_txg != txg) { | |
2555 | mutex_exit(&itxg->itxg_lock); | |
2556 | continue; | |
2557 | } | |
2558 | ||
55922e73 GW |
2559 | /* |
2560 | * If we're adding itx records to the zl_itx_commit_list, | |
2561 | * then the zil better be dirty in this "txg". We can assert | |
2562 | * that here since we're holding the itxg_lock which will | |
2563 | * prevent spa_sync from cleaning it. Once we add the itxs | |
2564 | * to the zl_itx_commit_list we must commit it to disk even | |
2565 | * if it's unnecessary (i.e. the txg was synced). | |
2566 | */ | |
2567 | ASSERT(zilog_is_dirty_in_txg(zilog, txg) || | |
2568 | spa_freeze_txg(zilog->zl_spa) != UINT64_MAX); | |
233425a1 AM |
2569 | list_t *sync_list = &itxg->itxg_itxs->i_sync_list; |
2570 | if (unlikely(zilog->zl_suspend > 0)) { | |
2571 | /* | |
2572 | * ZIL was just suspended, but we lost the race. | |
2573 | * Allow all earlier itxs to be committed, but ask | |
2574 | * caller to do txg_wait_synced(txg) for any new. | |
2575 | */ | |
2576 | if (!list_is_empty(sync_list)) | |
2577 | wtxg = MAX(wtxg, txg); | |
2578 | } else { | |
2579 | list_move_tail(commit_list, sync_list); | |
2580 | } | |
572e2857 BB |
2581 | |
2582 | mutex_exit(&itxg->itxg_lock); | |
2583 | } | |
233425a1 | 2584 | return (wtxg); |
572e2857 BB |
2585 | } |
2586 | ||
2587 | /* | |
2588 | * Move the async itxs for a specified object to commit into sync lists. | |
2589 | */ | |
eedb3a62 | 2590 | void |
572e2857 BB |
2591 | zil_async_to_sync(zilog_t *zilog, uint64_t foid) |
2592 | { | |
2593 | uint64_t otxg, txg; | |
2594 | itx_async_node_t *ian; | |
2595 | avl_tree_t *t; | |
2596 | avl_index_t where; | |
2597 | ||
2598 | if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */ | |
2599 | otxg = ZILTEST_TXG; | |
2600 | else | |
2601 | otxg = spa_last_synced_txg(zilog->zl_spa) + 1; | |
2602 | ||
55922e73 GW |
2603 | /* |
2604 | * This is inherently racy, since there is nothing to prevent | |
2605 | * the last synced txg from changing. | |
2606 | */ | |
572e2857 BB |
2607 | for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) { |
2608 | itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK]; | |
2609 | ||
2610 | mutex_enter(&itxg->itxg_lock); | |
2611 | if (itxg->itxg_txg != txg) { | |
2612 | mutex_exit(&itxg->itxg_lock); | |
2613 | continue; | |
2614 | } | |
2615 | ||
2616 | /* | |
2617 | * If a foid is specified then find that node and append its | |
2618 | * list. Otherwise walk the tree appending all the lists | |
2619 | * to the sync list. We add to the end rather than the | |
2620 | * beginning to ensure the create has happened. | |
2621 | */ | |
2622 | t = &itxg->itxg_itxs->i_async_tree; | |
2623 | if (foid != 0) { | |
2624 | ian = avl_find(t, &foid, &where); | |
2625 | if (ian != NULL) { | |
2626 | list_move_tail(&itxg->itxg_itxs->i_sync_list, | |
2627 | &ian->ia_list); | |
2628 | } | |
2629 | } else { | |
2630 | void *cookie = NULL; | |
2631 | ||
2632 | while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) { | |
2633 | list_move_tail(&itxg->itxg_itxs->i_sync_list, | |
2634 | &ian->ia_list); | |
2635 | list_destroy(&ian->ia_list); | |
2636 | kmem_free(ian, sizeof (itx_async_node_t)); | |
2637 | } | |
2638 | } | |
2639 | mutex_exit(&itxg->itxg_lock); | |
34dc7c2f | 2640 | } |
34dc7c2f BB |
2641 | } |
2642 | ||
1ce23dca PS |
2643 | /* |
2644 | * This function will prune commit itxs that are at the head of the | |
2645 | * commit list (it won't prune past the first non-commit itx), and | |
2646 | * either: a) attach them to the last lwb that's still pending | |
2647 | * completion, or b) skip them altogether. | |
2648 | * | |
2649 | * This is used as a performance optimization to prevent commit itxs | |
2650 | * from generating new lwbs when it's unnecessary to do so. | |
2651 | */ | |
b128c09f | 2652 | static void |
1ce23dca | 2653 | zil_prune_commit_list(zilog_t *zilog) |
34dc7c2f | 2654 | { |
572e2857 | 2655 | itx_t *itx; |
34dc7c2f | 2656 | |
1b2b0aca | 2657 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
572e2857 | 2658 | |
1ce23dca PS |
2659 | while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) { |
2660 | lr_t *lrc = &itx->itx_lr; | |
2661 | if (lrc->lrc_txtype != TX_COMMIT) | |
2662 | break; | |
572e2857 | 2663 | |
1ce23dca PS |
2664 | mutex_enter(&zilog->zl_lock); |
2665 | ||
2666 | lwb_t *last_lwb = zilog->zl_last_lwb_opened; | |
900d09b2 PS |
2667 | if (last_lwb == NULL || |
2668 | last_lwb->lwb_state == LWB_STATE_FLUSH_DONE) { | |
1ce23dca PS |
2669 | /* |
2670 | * All of the itxs this waiter was waiting on | |
2671 | * must have already completed (or there were | |
2672 | * never any itx's for it to wait on), so it's | |
2673 | * safe to skip this waiter and mark it done. | |
2674 | */ | |
2675 | zil_commit_waiter_skip(itx->itx_private); | |
2676 | } else { | |
2677 | zil_commit_waiter_link_lwb(itx->itx_private, last_lwb); | |
2678 | itx->itx_private = NULL; | |
2679 | } | |
2680 | ||
2681 | mutex_exit(&zilog->zl_lock); | |
2682 | ||
2683 | list_remove(&zilog->zl_itx_commit_list, itx); | |
2684 | zil_itx_destroy(itx); | |
2685 | } | |
2686 | ||
2687 | IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT); | |
2688 | } | |
2689 | ||
2690 | static void | |
2691 | zil_commit_writer_stall(zilog_t *zilog) | |
2692 | { | |
2693 | /* | |
2694 | * When zio_alloc_zil() fails to allocate the next lwb block on | |
2695 | * disk, we must call txg_wait_synced() to ensure all of the | |
2696 | * lwbs in the zilog's zl_lwb_list are synced and then freed (in | |
2697 | * zil_sync()), such that any subsequent ZIL writer (i.e. a call | |
2698 | * to zil_process_commit_list()) will have to call zil_create(), | |
2699 | * and start a new ZIL chain. | |
2700 | * | |
2701 | * Since zil_alloc_zil() failed, the lwb that was previously | |
2702 | * issued does not have a pointer to the "next" lwb on disk. | |
2703 | * Thus, if another ZIL writer thread was to allocate the "next" | |
2704 | * on-disk lwb, that block could be leaked in the event of a | |
2705 | * crash (because the previous lwb on-disk would not point to | |
2706 | * it). | |
2707 | * | |
1b2b0aca | 2708 | * We must hold the zilog's zl_issuer_lock while we do this, to |
1ce23dca PS |
2709 | * ensure no new threads enter zil_process_commit_list() until |
2710 | * all lwb's in the zl_lwb_list have been synced and freed | |
2711 | * (which is achieved via the txg_wait_synced() call). | |
2712 | */ | |
1b2b0aca | 2713 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca | 2714 | txg_wait_synced(zilog->zl_dmu_pool, 0); |
895e0313 | 2715 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); |
1ce23dca PS |
2716 | } |
2717 | ||
2718 | /* | |
2719 | * This function will traverse the commit list, creating new lwbs as | |
2720 | * needed, and committing the itxs from the commit list to these newly | |
2721 | * created lwbs. Additionally, as a new lwb is created, the previous | |
2722 | * lwb will be issued to the zio layer to be written to disk. | |
2723 | */ | |
2724 | static void | |
f63811f0 | 2725 | zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs) |
1ce23dca PS |
2726 | { |
2727 | spa_t *spa = zilog->zl_spa; | |
2728 | list_t nolwb_itxs; | |
2729 | list_t nolwb_waiters; | |
0f740a4f | 2730 | lwb_t *lwb, *plwb; |
1ce23dca | 2731 | itx_t *itx; |
0f740a4f | 2732 | boolean_t first = B_TRUE; |
1ce23dca | 2733 | |
1b2b0aca | 2734 | ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock)); |
572e2857 BB |
2735 | |
2736 | /* | |
2737 | * Return if there's nothing to commit before we dirty the fs by | |
2738 | * calling zil_create(). | |
2739 | */ | |
895e0313 | 2740 | if (list_is_empty(&zilog->zl_itx_commit_list)) |
572e2857 | 2741 | return; |
34dc7c2f | 2742 | |
1ce23dca PS |
2743 | list_create(&nolwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node)); |
2744 | list_create(&nolwb_waiters, sizeof (zil_commit_waiter_t), | |
2745 | offsetof(zil_commit_waiter_t, zcw_node)); | |
2746 | ||
2747 | lwb = list_tail(&zilog->zl_lwb_list); | |
2748 | if (lwb == NULL) { | |
2749 | lwb = zil_create(zilog); | |
34dc7c2f | 2750 | } else { |
361a7e82 JP |
2751 | /* |
2752 | * Activate SPA_FEATURE_ZILSAXATTR for the cases where ZIL will | |
2753 | * have already been created (zl_lwb_list not empty). | |
2754 | */ | |
2755 | zil_commit_activate_saxattr_feature(zilog); | |
1ce23dca | 2756 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); |
900d09b2 PS |
2757 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE); |
2758 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE); | |
0f740a4f AM |
2759 | first = (lwb->lwb_state != LWB_STATE_OPENED) && |
2760 | ((plwb = list_prev(&zilog->zl_lwb_list, lwb)) == NULL || | |
2761 | plwb->lwb_state == LWB_STATE_FLUSH_DONE); | |
34dc7c2f BB |
2762 | } |
2763 | ||
895e0313 | 2764 | while ((itx = list_remove_head(&zilog->zl_itx_commit_list)) != NULL) { |
1ce23dca PS |
2765 | lr_t *lrc = &itx->itx_lr; |
2766 | uint64_t txg = lrc->lrc_txg; | |
2767 | ||
55922e73 | 2768 | ASSERT3U(txg, !=, 0); |
34dc7c2f | 2769 | |
1ce23dca PS |
2770 | if (lrc->lrc_txtype == TX_COMMIT) { |
2771 | DTRACE_PROBE2(zil__process__commit__itx, | |
2772 | zilog_t *, zilog, itx_t *, itx); | |
2773 | } else { | |
2774 | DTRACE_PROBE2(zil__process__normal__itx, | |
2775 | zilog_t *, zilog, itx_t *, itx); | |
2776 | } | |
2777 | ||
1ce23dca PS |
2778 | boolean_t synced = txg <= spa_last_synced_txg(spa); |
2779 | boolean_t frozen = txg > spa_freeze_txg(spa); | |
2780 | ||
2fe61a7e PS |
2781 | /* |
2782 | * If the txg of this itx has already been synced out, then | |
2783 | * we don't need to commit this itx to an lwb. This is | |
2784 | * because the data of this itx will have already been | |
2785 | * written to the main pool. This is inherently racy, and | |
2786 | * it's still ok to commit an itx whose txg has already | |
2787 | * been synced; this will result in a write that's | |
2788 | * unnecessary, but will do no harm. | |
2789 | * | |
2790 | * With that said, we always want to commit TX_COMMIT itxs | |
2791 | * to an lwb, regardless of whether or not that itx's txg | |
2792 | * has been synced out. We do this to ensure any OPENED lwb | |
2793 | * will always have at least one zil_commit_waiter_t linked | |
2794 | * to the lwb. | |
2795 | * | |
2796 | * As a counter-example, if we skipped TX_COMMIT itx's | |
2797 | * whose txg had already been synced, the following | |
2798 | * situation could occur if we happened to be racing with | |
2799 | * spa_sync: | |
2800 | * | |
2801 | * 1. We commit a non-TX_COMMIT itx to an lwb, where the | |
2802 | * itx's txg is 10 and the last synced txg is 9. | |
2803 | * 2. spa_sync finishes syncing out txg 10. | |
2804 | * 3. We move to the next itx in the list, it's a TX_COMMIT | |
2805 | * whose txg is 10, so we skip it rather than committing | |
2806 | * it to the lwb used in (1). | |
2807 | * | |
2808 | * If the itx that is skipped in (3) is the last TX_COMMIT | |
2809 | * itx in the commit list, than it's possible for the lwb | |
2810 | * used in (1) to remain in the OPENED state indefinitely. | |
2811 | * | |
2812 | * To prevent the above scenario from occurring, ensuring | |
2813 | * that once an lwb is OPENED it will transition to ISSUED | |
2814 | * and eventually DONE, we always commit TX_COMMIT itx's to | |
2815 | * an lwb here, even if that itx's txg has already been | |
2816 | * synced. | |
2817 | * | |
2818 | * Finally, if the pool is frozen, we _always_ commit the | |
2819 | * itx. The point of freezing the pool is to prevent data | |
2820 | * from being written to the main pool via spa_sync, and | |
2821 | * instead rely solely on the ZIL to persistently store the | |
2822 | * data; i.e. when the pool is frozen, the last synced txg | |
2823 | * value can't be trusted. | |
2824 | */ | |
2825 | if (frozen || !synced || lrc->lrc_txtype == TX_COMMIT) { | |
1ce23dca | 2826 | if (lwb != NULL) { |
f63811f0 AM |
2827 | lwb = zil_lwb_assign(zilog, lwb, itx, ilwbs); |
2828 | if (lwb == NULL) { | |
1ce23dca | 2829 | list_insert_tail(&nolwb_itxs, itx); |
f63811f0 AM |
2830 | } else if ((zcw->zcw_lwb != NULL && |
2831 | zcw->zcw_lwb != lwb) || zcw->zcw_done) { | |
2832 | /* | |
2833 | * Our lwb is done, leave the rest of | |
2834 | * itx list to somebody else who care. | |
2835 | */ | |
2836 | first = B_FALSE; | |
2837 | break; | |
2838 | } | |
1ce23dca PS |
2839 | } else { |
2840 | if (lrc->lrc_txtype == TX_COMMIT) { | |
2841 | zil_commit_waiter_link_nolwb( | |
2842 | itx->itx_private, &nolwb_waiters); | |
2843 | } | |
1ce23dca PS |
2844 | list_insert_tail(&nolwb_itxs, itx); |
2845 | } | |
2846 | } else { | |
2fe61a7e | 2847 | ASSERT3S(lrc->lrc_txtype, !=, TX_COMMIT); |
1ce23dca PS |
2848 | zil_itx_destroy(itx); |
2849 | } | |
34dc7c2f | 2850 | } |
34dc7c2f | 2851 | |
1ce23dca PS |
2852 | if (lwb == NULL) { |
2853 | /* | |
2854 | * This indicates zio_alloc_zil() failed to allocate the | |
2855 | * "next" lwb on-disk. When this happens, we must stall | |
2856 | * the ZIL write pipeline; see the comment within | |
2857 | * zil_commit_writer_stall() for more details. | |
2858 | */ | |
f63811f0 AM |
2859 | while ((lwb = list_remove_head(ilwbs)) != NULL) |
2860 | zil_lwb_write_issue(zilog, lwb); | |
1ce23dca | 2861 | zil_commit_writer_stall(zilog); |
34dc7c2f | 2862 | |
1ce23dca PS |
2863 | /* |
2864 | * Additionally, we have to signal and mark the "nolwb" | |
2865 | * waiters as "done" here, since without an lwb, we | |
2866 | * can't do this via zil_lwb_flush_vdevs_done() like | |
2867 | * normal. | |
2868 | */ | |
2869 | zil_commit_waiter_t *zcw; | |
895e0313 | 2870 | while ((zcw = list_remove_head(&nolwb_waiters)) != NULL) |
1ce23dca | 2871 | zil_commit_waiter_skip(zcw); |
1ce23dca PS |
2872 | |
2873 | /* | |
2874 | * And finally, we have to destroy the itx's that | |
2875 | * couldn't be committed to an lwb; this will also call | |
2876 | * the itx's callback if one exists for the itx. | |
2877 | */ | |
895e0313 | 2878 | while ((itx = list_remove_head(&nolwb_itxs)) != NULL) |
1ce23dca | 2879 | zil_itx_destroy(itx); |
1ce23dca PS |
2880 | } else { |
2881 | ASSERT(list_is_empty(&nolwb_waiters)); | |
2882 | ASSERT3P(lwb, !=, NULL); | |
2883 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); | |
900d09b2 PS |
2884 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE); |
2885 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE); | |
1ce23dca PS |
2886 | |
2887 | /* | |
2888 | * At this point, the ZIL block pointed at by the "lwb" | |
2889 | * variable is in one of the following states: "closed" | |
2890 | * or "open". | |
2891 | * | |
2fe61a7e PS |
2892 | * If it's "closed", then no itxs have been committed to |
2893 | * it, so there's no point in issuing its zio (i.e. it's | |
2894 | * "empty"). | |
1ce23dca | 2895 | * |
2fe61a7e PS |
2896 | * If it's "open", then it contains one or more itxs that |
2897 | * eventually need to be committed to stable storage. In | |
2898 | * this case we intentionally do not issue the lwb's zio | |
2899 | * to disk yet, and instead rely on one of the following | |
2900 | * two mechanisms for issuing the zio: | |
1ce23dca | 2901 | * |
2fe61a7e | 2902 | * 1. Ideally, there will be more ZIL activity occurring |
1ce23dca | 2903 | * on the system, such that this function will be |
2fe61a7e | 2904 | * immediately called again (not necessarily by the same |
1ce23dca | 2905 | * thread) and this lwb's zio will be issued via |
f63811f0 | 2906 | * zil_lwb_assign(). This way, the lwb is guaranteed to |
1ce23dca PS |
2907 | * be "full" when it is issued to disk, and we'll make |
2908 | * use of the lwb's size the best we can. | |
2909 | * | |
2fe61a7e | 2910 | * 2. If there isn't sufficient ZIL activity occurring on |
1ce23dca | 2911 | * the system, such that this lwb's zio isn't issued via |
f63811f0 | 2912 | * zil_lwb_assign(), zil_commit_waiter() will issue the |
1ce23dca PS |
2913 | * lwb's zio. If this occurs, the lwb is not guaranteed |
2914 | * to be "full" by the time its zio is issued, and means | |
2915 | * the size of the lwb was "too large" given the amount | |
2fe61a7e | 2916 | * of ZIL activity occurring on the system at that time. |
1ce23dca PS |
2917 | * |
2918 | * We do this for a couple of reasons: | |
2919 | * | |
2920 | * 1. To try and reduce the number of IOPs needed to | |
2921 | * write the same number of itxs. If an lwb has space | |
2fe61a7e | 2922 | * available in its buffer for more itxs, and more itxs |
1ce23dca PS |
2923 | * will be committed relatively soon (relative to the |
2924 | * latency of performing a write), then it's beneficial | |
2925 | * to wait for these "next" itxs. This way, more itxs | |
2926 | * can be committed to stable storage with fewer writes. | |
2927 | * | |
2928 | * 2. To try and use the largest lwb block size that the | |
2929 | * incoming rate of itxs can support. Again, this is to | |
2930 | * try and pack as many itxs into as few lwbs as | |
2931 | * possible, without significantly impacting the latency | |
2932 | * of each individual itx. | |
0f740a4f AM |
2933 | * |
2934 | * If we had no already running or open LWBs, it can be | |
2935 | * the workload is single-threaded. And if the ZIL write | |
2936 | * latency is very small or if the LWB is almost full, it | |
2937 | * may be cheaper to bypass the delay. | |
1ce23dca | 2938 | */ |
0f740a4f AM |
2939 | if (lwb->lwb_state == LWB_STATE_OPENED && first) { |
2940 | hrtime_t sleep = zilog->zl_last_lwb_latency * | |
2941 | zfs_commit_timeout_pct / 100; | |
2942 | if (sleep < zil_min_commit_timeout || | |
2943 | lwb->lwb_sz - lwb->lwb_nused < lwb->lwb_sz / 8) { | |
2cb992a9 | 2944 | lwb = zil_lwb_write_close(zilog, lwb, ilwbs); |
0f740a4f | 2945 | zilog->zl_cur_used = 0; |
f63811f0 AM |
2946 | if (lwb == NULL) { |
2947 | while ((lwb = list_remove_head(ilwbs)) | |
2948 | != NULL) | |
2949 | zil_lwb_write_issue(zilog, lwb); | |
0f740a4f | 2950 | zil_commit_writer_stall(zilog); |
f63811f0 | 2951 | } |
0f740a4f AM |
2952 | } |
2953 | } | |
1ce23dca PS |
2954 | } |
2955 | } | |
2956 | ||
2957 | /* | |
2958 | * This function is responsible for ensuring the passed in commit waiter | |
2959 | * (and associated commit itx) is committed to an lwb. If the waiter is | |
2960 | * not already committed to an lwb, all itxs in the zilog's queue of | |
2961 | * itxs will be processed. The assumption is the passed in waiter's | |
2962 | * commit itx will found in the queue just like the other non-commit | |
2963 | * itxs, such that when the entire queue is processed, the waiter will | |
2fe61a7e | 2964 | * have been committed to an lwb. |
1ce23dca PS |
2965 | * |
2966 | * The lwb associated with the passed in waiter is not guaranteed to | |
2967 | * have been issued by the time this function completes. If the lwb is | |
2968 | * not issued, we rely on future calls to zil_commit_writer() to issue | |
2969 | * the lwb, or the timeout mechanism found in zil_commit_waiter(). | |
2970 | */ | |
233425a1 | 2971 | static uint64_t |
1ce23dca PS |
2972 | zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw) |
2973 | { | |
f63811f0 AM |
2974 | list_t ilwbs; |
2975 | lwb_t *lwb; | |
233425a1 | 2976 | uint64_t wtxg = 0; |
f63811f0 | 2977 | |
1ce23dca PS |
2978 | ASSERT(!MUTEX_HELD(&zilog->zl_lock)); |
2979 | ASSERT(spa_writeable(zilog->zl_spa)); | |
1ce23dca | 2980 | |
f63811f0 | 2981 | list_create(&ilwbs, sizeof (lwb_t), offsetof(lwb_t, lwb_issue_node)); |
1b2b0aca | 2982 | mutex_enter(&zilog->zl_issuer_lock); |
1ce23dca PS |
2983 | |
2984 | if (zcw->zcw_lwb != NULL || zcw->zcw_done) { | |
2985 | /* | |
2986 | * It's possible that, while we were waiting to acquire | |
1b2b0aca | 2987 | * the "zl_issuer_lock", another thread committed this |
1ce23dca PS |
2988 | * waiter to an lwb. If that occurs, we bail out early, |
2989 | * without processing any of the zilog's queue of itxs. | |
2990 | * | |
2991 | * On certain workloads and system configurations, the | |
1b2b0aca | 2992 | * "zl_issuer_lock" can become highly contended. In an |
1ce23dca PS |
2993 | * attempt to reduce this contention, we immediately drop |
2994 | * the lock if the waiter has already been processed. | |
2995 | * | |
2996 | * We've measured this optimization to reduce CPU spent | |
2997 | * contending on this lock by up to 5%, using a system | |
2998 | * with 32 CPUs, low latency storage (~50 usec writes), | |
2999 | * and 1024 threads performing sync writes. | |
3000 | */ | |
3001 | goto out; | |
3002 | } | |
3003 | ||
fb087146 | 3004 | ZIL_STAT_BUMP(zilog, zil_commit_writer_count); |
1ce23dca | 3005 | |
233425a1 | 3006 | wtxg = zil_get_commit_list(zilog); |
1ce23dca | 3007 | zil_prune_commit_list(zilog); |
f63811f0 | 3008 | zil_process_commit_list(zilog, zcw, &ilwbs); |
1ce23dca PS |
3009 | |
3010 | out: | |
1b2b0aca | 3011 | mutex_exit(&zilog->zl_issuer_lock); |
f63811f0 AM |
3012 | while ((lwb = list_remove_head(&ilwbs)) != NULL) |
3013 | zil_lwb_write_issue(zilog, lwb); | |
3014 | list_destroy(&ilwbs); | |
233425a1 | 3015 | return (wtxg); |
1ce23dca PS |
3016 | } |
3017 | ||
3018 | static void | |
3019 | zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw) | |
3020 | { | |
1b2b0aca | 3021 | ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca PS |
3022 | ASSERT(MUTEX_HELD(&zcw->zcw_lock)); |
3023 | ASSERT3B(zcw->zcw_done, ==, B_FALSE); | |
3024 | ||
3025 | lwb_t *lwb = zcw->zcw_lwb; | |
3026 | ASSERT3P(lwb, !=, NULL); | |
3027 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_CLOSED); | |
34dc7c2f BB |
3028 | |
3029 | /* | |
1ce23dca PS |
3030 | * If the lwb has already been issued by another thread, we can |
3031 | * immediately return since there's no work to be done (the | |
3032 | * point of this function is to issue the lwb). Additionally, we | |
1b2b0aca | 3033 | * do this prior to acquiring the zl_issuer_lock, to avoid |
1ce23dca | 3034 | * acquiring it when it's not necessary to do so. |
34dc7c2f | 3035 | */ |
1ce23dca | 3036 | if (lwb->lwb_state == LWB_STATE_ISSUED || |
900d09b2 PS |
3037 | lwb->lwb_state == LWB_STATE_WRITE_DONE || |
3038 | lwb->lwb_state == LWB_STATE_FLUSH_DONE) | |
1ce23dca | 3039 | return; |
34dc7c2f | 3040 | |
1ce23dca | 3041 | /* |
f63811f0 | 3042 | * In order to call zil_lwb_write_close() we must hold the |
1b2b0aca | 3043 | * zilog's "zl_issuer_lock". We can't simply acquire that lock, |
1ce23dca | 3044 | * since we're already holding the commit waiter's "zcw_lock", |
2fe61a7e | 3045 | * and those two locks are acquired in the opposite order |
1ce23dca PS |
3046 | * elsewhere. |
3047 | */ | |
3048 | mutex_exit(&zcw->zcw_lock); | |
1b2b0aca | 3049 | mutex_enter(&zilog->zl_issuer_lock); |
1ce23dca | 3050 | mutex_enter(&zcw->zcw_lock); |
34dc7c2f | 3051 | |
1ce23dca PS |
3052 | /* |
3053 | * Since we just dropped and re-acquired the commit waiter's | |
3054 | * lock, we have to re-check to see if the waiter was marked | |
3055 | * "done" during that process. If the waiter was marked "done", | |
3056 | * the "lwb" pointer is no longer valid (it can be free'd after | |
3057 | * the waiter is marked "done"), so without this check we could | |
3058 | * wind up with a use-after-free error below. | |
3059 | */ | |
f63811f0 AM |
3060 | if (zcw->zcw_done) { |
3061 | lwb = NULL; | |
1ce23dca | 3062 | goto out; |
f63811f0 | 3063 | } |
119a394a | 3064 | |
1ce23dca PS |
3065 | ASSERT3P(lwb, ==, zcw->zcw_lwb); |
3066 | ||
3067 | /* | |
2fe61a7e PS |
3068 | * We've already checked this above, but since we hadn't acquired |
3069 | * the zilog's zl_issuer_lock, we have to perform this check a | |
3070 | * second time while holding the lock. | |
3071 | * | |
3072 | * We don't need to hold the zl_lock since the lwb cannot transition | |
3073 | * from OPENED to ISSUED while we hold the zl_issuer_lock. The lwb | |
3074 | * _can_ transition from ISSUED to DONE, but it's OK to race with | |
3075 | * that transition since we treat the lwb the same, whether it's in | |
3076 | * the ISSUED or DONE states. | |
3077 | * | |
3078 | * The important thing, is we treat the lwb differently depending on | |
3079 | * if it's ISSUED or OPENED, and block any other threads that might | |
3080 | * attempt to issue this lwb. For that reason we hold the | |
3081 | * zl_issuer_lock when checking the lwb_state; we must not call | |
f63811f0 | 3082 | * zil_lwb_write_close() if the lwb had already been issued. |
2fe61a7e PS |
3083 | * |
3084 | * See the comment above the lwb_state_t structure definition for | |
3085 | * more details on the lwb states, and locking requirements. | |
1ce23dca PS |
3086 | */ |
3087 | if (lwb->lwb_state == LWB_STATE_ISSUED || | |
900d09b2 | 3088 | lwb->lwb_state == LWB_STATE_WRITE_DONE || |
f63811f0 AM |
3089 | lwb->lwb_state == LWB_STATE_FLUSH_DONE) { |
3090 | lwb = NULL; | |
1ce23dca | 3091 | goto out; |
f63811f0 | 3092 | } |
1ce23dca PS |
3093 | |
3094 | ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED); | |
3095 | ||
3096 | /* | |
3097 | * As described in the comments above zil_commit_waiter() and | |
3098 | * zil_process_commit_list(), we need to issue this lwb's zio | |
3099 | * since we've reached the commit waiter's timeout and it still | |
3100 | * hasn't been issued. | |
3101 | */ | |
2cb992a9 | 3102 | lwb_t *nlwb = zil_lwb_write_close(zilog, lwb, NULL); |
1ce23dca | 3103 | |
895e0313 | 3104 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED); |
1ce23dca PS |
3105 | |
3106 | /* | |
3107 | * Since the lwb's zio hadn't been issued by the time this thread | |
3108 | * reached its timeout, we reset the zilog's "zl_cur_used" field | |
3109 | * to influence the zil block size selection algorithm. | |
3110 | * | |
3111 | * By having to issue the lwb's zio here, it means the size of the | |
3112 | * lwb was too large, given the incoming throughput of itxs. By | |
3113 | * setting "zl_cur_used" to zero, we communicate this fact to the | |
2fe61a7e | 3114 | * block size selection algorithm, so it can take this information |
1ce23dca PS |
3115 | * into account, and potentially select a smaller size for the |
3116 | * next lwb block that is allocated. | |
3117 | */ | |
3118 | zilog->zl_cur_used = 0; | |
3119 | ||
3120 | if (nlwb == NULL) { | |
3121 | /* | |
f63811f0 | 3122 | * When zil_lwb_write_close() returns NULL, this |
1ce23dca PS |
3123 | * indicates zio_alloc_zil() failed to allocate the |
3124 | * "next" lwb on-disk. When this occurs, the ZIL write | |
3125 | * pipeline must be stalled; see the comment within the | |
3126 | * zil_commit_writer_stall() function for more details. | |
3127 | * | |
3128 | * We must drop the commit waiter's lock prior to | |
3129 | * calling zil_commit_writer_stall() or else we can wind | |
3130 | * up with the following deadlock: | |
3131 | * | |
3132 | * - This thread is waiting for the txg to sync while | |
3133 | * holding the waiter's lock; txg_wait_synced() is | |
3134 | * used within txg_commit_writer_stall(). | |
3135 | * | |
3136 | * - The txg can't sync because it is waiting for this | |
3137 | * lwb's zio callback to call dmu_tx_commit(). | |
3138 | * | |
3139 | * - The lwb's zio callback can't call dmu_tx_commit() | |
3140 | * because it's blocked trying to acquire the waiter's | |
3141 | * lock, which occurs prior to calling dmu_tx_commit() | |
3142 | */ | |
3143 | mutex_exit(&zcw->zcw_lock); | |
f63811f0 AM |
3144 | zil_lwb_write_issue(zilog, lwb); |
3145 | lwb = NULL; | |
1ce23dca PS |
3146 | zil_commit_writer_stall(zilog); |
3147 | mutex_enter(&zcw->zcw_lock); | |
119a394a ED |
3148 | } |
3149 | ||
1ce23dca | 3150 | out: |
1b2b0aca | 3151 | mutex_exit(&zilog->zl_issuer_lock); |
f63811f0 AM |
3152 | if (lwb) |
3153 | zil_lwb_write_issue(zilog, lwb); | |
1ce23dca PS |
3154 | ASSERT(MUTEX_HELD(&zcw->zcw_lock)); |
3155 | } | |
3156 | ||
3157 | /* | |
3158 | * This function is responsible for performing the following two tasks: | |
3159 | * | |
3160 | * 1. its primary responsibility is to block until the given "commit | |
3161 | * waiter" is considered "done". | |
3162 | * | |
3163 | * 2. its secondary responsibility is to issue the zio for the lwb that | |
3164 | * the given "commit waiter" is waiting on, if this function has | |
3165 | * waited "long enough" and the lwb is still in the "open" state. | |
3166 | * | |
3167 | * Given a sufficient amount of itxs being generated and written using | |
f63811f0 | 3168 | * the ZIL, the lwb's zio will be issued via the zil_lwb_assign() |
1ce23dca PS |
3169 | * function. If this does not occur, this secondary responsibility will |
3170 | * ensure the lwb is issued even if there is not other synchronous | |
3171 | * activity on the system. | |
3172 | * | |
3173 | * For more details, see zil_process_commit_list(); more specifically, | |
3174 | * the comment at the bottom of that function. | |
3175 | */ | |
3176 | static void | |
3177 | zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw) | |
3178 | { | |
3179 | ASSERT(!MUTEX_HELD(&zilog->zl_lock)); | |
1b2b0aca | 3180 | ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock)); |
1ce23dca | 3181 | ASSERT(spa_writeable(zilog->zl_spa)); |
1ce23dca PS |
3182 | |
3183 | mutex_enter(&zcw->zcw_lock); | |
428870ff BB |
3184 | |
3185 | /* | |
1ce23dca PS |
3186 | * The timeout is scaled based on the lwb latency to avoid |
3187 | * significantly impacting the latency of each individual itx. | |
3188 | * For more details, see the comment at the bottom of the | |
3189 | * zil_process_commit_list() function. | |
428870ff | 3190 | */ |
1ce23dca PS |
3191 | int pct = MAX(zfs_commit_timeout_pct, 1); |
3192 | hrtime_t sleep = (zilog->zl_last_lwb_latency * pct) / 100; | |
3193 | hrtime_t wakeup = gethrtime() + sleep; | |
3194 | boolean_t timedout = B_FALSE; | |
3195 | ||
3196 | while (!zcw->zcw_done) { | |
3197 | ASSERT(MUTEX_HELD(&zcw->zcw_lock)); | |
3198 | ||
3199 | lwb_t *lwb = zcw->zcw_lwb; | |
3200 | ||
3201 | /* | |
3202 | * Usually, the waiter will have a non-NULL lwb field here, | |
3203 | * but it's possible for it to be NULL as a result of | |
3204 | * zil_commit() racing with spa_sync(). | |
3205 | * | |
3206 | * When zil_clean() is called, it's possible for the itxg | |
3207 | * list (which may be cleaned via a taskq) to contain | |
3208 | * commit itxs. When this occurs, the commit waiters linked | |
3209 | * off of these commit itxs will not be committed to an | |
3210 | * lwb. Additionally, these commit waiters will not be | |
3211 | * marked done until zil_commit_waiter_skip() is called via | |
3212 | * zil_itxg_clean(). | |
3213 | * | |
3214 | * Thus, it's possible for this commit waiter (i.e. the | |
3215 | * "zcw" variable) to be found in this "in between" state; | |
3216 | * where it's "zcw_lwb" field is NULL, and it hasn't yet | |
3217 | * been skipped, so it's "zcw_done" field is still B_FALSE. | |
3218 | */ | |
3219 | IMPLY(lwb != NULL, lwb->lwb_state != LWB_STATE_CLOSED); | |
3220 | ||
3221 | if (lwb != NULL && lwb->lwb_state == LWB_STATE_OPENED) { | |
3222 | ASSERT3B(timedout, ==, B_FALSE); | |
3223 | ||
3224 | /* | |
3225 | * If the lwb hasn't been issued yet, then we | |
3226 | * need to wait with a timeout, in case this | |
3227 | * function needs to issue the lwb after the | |
3228 | * timeout is reached; responsibility (2) from | |
3229 | * the comment above this function. | |
3230 | */ | |
8056a756 | 3231 | int rc = cv_timedwait_hires(&zcw->zcw_cv, |
1ce23dca PS |
3232 | &zcw->zcw_lock, wakeup, USEC2NSEC(1), |
3233 | CALLOUT_FLAG_ABSOLUTE); | |
3234 | ||
8056a756 | 3235 | if (rc != -1 || zcw->zcw_done) |
1ce23dca PS |
3236 | continue; |
3237 | ||
3238 | timedout = B_TRUE; | |
3239 | zil_commit_waiter_timeout(zilog, zcw); | |
3240 | ||
3241 | if (!zcw->zcw_done) { | |
3242 | /* | |
3243 | * If the commit waiter has already been | |
3244 | * marked "done", it's possible for the | |
3245 | * waiter's lwb structure to have already | |
3246 | * been freed. Thus, we can only reliably | |
3247 | * make these assertions if the waiter | |
3248 | * isn't done. | |
3249 | */ | |
3250 | ASSERT3P(lwb, ==, zcw->zcw_lwb); | |
3251 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED); | |
3252 | } | |
3253 | } else { | |
3254 | /* | |
3255 | * If the lwb isn't open, then it must have already | |
3256 | * been issued. In that case, there's no need to | |
3257 | * use a timeout when waiting for the lwb to | |
3258 | * complete. | |
3259 | * | |
3260 | * Additionally, if the lwb is NULL, the waiter | |
2fe61a7e | 3261 | * will soon be signaled and marked done via |
1ce23dca PS |
3262 | * zil_clean() and zil_itxg_clean(), so no timeout |
3263 | * is required. | |
3264 | */ | |
3265 | ||
3266 | IMPLY(lwb != NULL, | |
3267 | lwb->lwb_state == LWB_STATE_ISSUED || | |
900d09b2 PS |
3268 | lwb->lwb_state == LWB_STATE_WRITE_DONE || |
3269 | lwb->lwb_state == LWB_STATE_FLUSH_DONE); | |
1ce23dca PS |
3270 | cv_wait(&zcw->zcw_cv, &zcw->zcw_lock); |
3271 | } | |
3272 | } | |
3273 | ||
3274 | mutex_exit(&zcw->zcw_lock); | |
3275 | } | |
3276 | ||
3277 | static zil_commit_waiter_t * | |
3278 | zil_alloc_commit_waiter(void) | |
3279 | { | |
3280 | zil_commit_waiter_t *zcw = kmem_cache_alloc(zil_zcw_cache, KM_SLEEP); | |
3281 | ||
3282 | cv_init(&zcw->zcw_cv, NULL, CV_DEFAULT, NULL); | |
3283 | mutex_init(&zcw->zcw_lock, NULL, MUTEX_DEFAULT, NULL); | |
3284 | list_link_init(&zcw->zcw_node); | |
3285 | zcw->zcw_lwb = NULL; | |
3286 | zcw->zcw_done = B_FALSE; | |
3287 | zcw->zcw_zio_error = 0; | |
3288 | ||
3289 | return (zcw); | |
3290 | } | |
3291 | ||
3292 | static void | |
3293 | zil_free_commit_waiter(zil_commit_waiter_t *zcw) | |
3294 | { | |
3295 | ASSERT(!list_link_active(&zcw->zcw_node)); | |
3296 | ASSERT3P(zcw->zcw_lwb, ==, NULL); | |
3297 | ASSERT3B(zcw->zcw_done, ==, B_TRUE); | |
3298 | mutex_destroy(&zcw->zcw_lock); | |
3299 | cv_destroy(&zcw->zcw_cv); | |
3300 | kmem_cache_free(zil_zcw_cache, zcw); | |
34dc7c2f BB |
3301 | } |
3302 | ||
3303 | /* | |
1ce23dca PS |
3304 | * This function is used to create a TX_COMMIT itx and assign it. This |
3305 | * way, it will be linked into the ZIL's list of synchronous itxs, and | |
3306 | * then later committed to an lwb (or skipped) when | |
3307 | * zil_process_commit_list() is called. | |
3308 | */ | |
3309 | static void | |
3310 | zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw) | |
3311 | { | |
3312 | dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); | |
2fd1c304 AM |
3313 | |
3314 | /* | |
3315 | * Since we are not going to create any new dirty data, and we | |
3316 | * can even help with clearing the existing dirty data, we | |
3317 | * should not be subject to the dirty data based delays. We | |
3318 | * use TXG_NOTHROTTLE to bypass the delay mechanism. | |
3319 | */ | |
3320 | VERIFY0(dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE)); | |
1ce23dca PS |
3321 | |
3322 | itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t)); | |
3323 | itx->itx_sync = B_TRUE; | |
3324 | itx->itx_private = zcw; | |
3325 | ||
3326 | zil_itx_assign(zilog, itx, tx); | |
3327 | ||
3328 | dmu_tx_commit(tx); | |
3329 | } | |
3330 | ||
3331 | /* | |
3332 | * Commit ZFS Intent Log transactions (itxs) to stable storage. | |
3333 | * | |
3334 | * When writing ZIL transactions to the on-disk representation of the | |
3335 | * ZIL, the itxs are committed to a Log Write Block (lwb). Multiple | |
3336 | * itxs can be committed to a single lwb. Once a lwb is written and | |
3337 | * committed to stable storage (i.e. the lwb is written, and vdevs have | |
3338 | * been flushed), each itx that was committed to that lwb is also | |
3339 | * considered to be committed to stable storage. | |
3340 | * | |
3341 | * When an itx is committed to an lwb, the log record (lr_t) contained | |
3342 | * by the itx is copied into the lwb's zio buffer, and once this buffer | |
3343 | * is written to disk, it becomes an on-disk ZIL block. | |
3344 | * | |
3345 | * As itxs are generated, they're inserted into the ZIL's queue of | |
3346 | * uncommitted itxs. The semantics of zil_commit() are such that it will | |
3347 | * block until all itxs that were in the queue when it was called, are | |
3348 | * committed to stable storage. | |
3349 | * | |
3350 | * If "foid" is zero, this means all "synchronous" and "asynchronous" | |
3351 | * itxs, for all objects in the dataset, will be committed to stable | |
3352 | * storage prior to zil_commit() returning. If "foid" is non-zero, all | |
3353 | * "synchronous" itxs for all objects, but only "asynchronous" itxs | |
3354 | * that correspond to the foid passed in, will be committed to stable | |
3355 | * storage prior to zil_commit() returning. | |
3356 | * | |
3357 | * Generally speaking, when zil_commit() is called, the consumer doesn't | |
3358 | * actually care about _all_ of the uncommitted itxs. Instead, they're | |
3359 | * simply trying to waiting for a specific itx to be committed to disk, | |
3360 | * but the interface(s) for interacting with the ZIL don't allow such | |
3361 | * fine-grained communication. A better interface would allow a consumer | |
3362 | * to create and assign an itx, and then pass a reference to this itx to | |
3363 | * zil_commit(); such that zil_commit() would return as soon as that | |
3364 | * specific itx was committed to disk (instead of waiting for _all_ | |
3365 | * itxs to be committed). | |
3366 | * | |
3367 | * When a thread calls zil_commit() a special "commit itx" will be | |
3368 | * generated, along with a corresponding "waiter" for this commit itx. | |
3369 | * zil_commit() will wait on this waiter's CV, such that when the waiter | |
2fe61a7e | 3370 | * is marked done, and signaled, zil_commit() will return. |
1ce23dca PS |
3371 | * |
3372 | * This commit itx is inserted into the queue of uncommitted itxs. This | |
3373 | * provides an easy mechanism for determining which itxs were in the | |
3374 | * queue prior to zil_commit() having been called, and which itxs were | |
3375 | * added after zil_commit() was called. | |
3376 | * | |
2310dba9 | 3377 | * The commit itx is special; it doesn't have any on-disk representation. |
1ce23dca PS |
3378 | * When a commit itx is "committed" to an lwb, the waiter associated |
3379 | * with it is linked onto the lwb's list of waiters. Then, when that lwb | |
2fe61a7e | 3380 | * completes, each waiter on the lwb's list is marked done and signaled |
1ce23dca PS |
3381 | * -- allowing the thread waiting on the waiter to return from zil_commit(). |
3382 | * | |
3383 | * It's important to point out a few critical factors that allow us | |
3384 | * to make use of the commit itxs, commit waiters, per-lwb lists of | |
3385 | * commit waiters, and zio completion callbacks like we're doing: | |
572e2857 | 3386 | * |
1ce23dca | 3387 | * 1. The list of waiters for each lwb is traversed, and each commit |
2fe61a7e | 3388 | * waiter is marked "done" and signaled, in the zio completion |
1ce23dca | 3389 | * callback of the lwb's zio[*]. |
572e2857 | 3390 | * |
2fe61a7e | 3391 | * * Actually, the waiters are signaled in the zio completion |
1ce23dca PS |
3392 | * callback of the root zio for the DKIOCFLUSHWRITECACHE commands |
3393 | * that are sent to the vdevs upon completion of the lwb zio. | |
572e2857 | 3394 | * |
1ce23dca PS |
3395 | * 2. When the itxs are inserted into the ZIL's queue of uncommitted |
3396 | * itxs, the order in which they are inserted is preserved[*]; as | |
3397 | * itxs are added to the queue, they are added to the tail of | |
3398 | * in-memory linked lists. | |
572e2857 | 3399 | * |
1ce23dca PS |
3400 | * When committing the itxs to lwbs (to be written to disk), they |
3401 | * are committed in the same order in which the itxs were added to | |
3402 | * the uncommitted queue's linked list(s); i.e. the linked list of | |
3403 | * itxs to commit is traversed from head to tail, and each itx is | |
3404 | * committed to an lwb in that order. | |
3405 | * | |
3406 | * * To clarify: | |
3407 | * | |
3408 | * - the order of "sync" itxs is preserved w.r.t. other | |
3409 | * "sync" itxs, regardless of the corresponding objects. | |
3410 | * - the order of "async" itxs is preserved w.r.t. other | |
3411 | * "async" itxs corresponding to the same object. | |
3412 | * - the order of "async" itxs is *not* preserved w.r.t. other | |
3413 | * "async" itxs corresponding to different objects. | |
3414 | * - the order of "sync" itxs w.r.t. "async" itxs (or vice | |
3415 | * versa) is *not* preserved, even for itxs that correspond | |
3416 | * to the same object. | |
3417 | * | |
3418 | * For more details, see: zil_itx_assign(), zil_async_to_sync(), | |
3419 | * zil_get_commit_list(), and zil_process_commit_list(). | |
3420 | * | |
3421 | * 3. The lwbs represent a linked list of blocks on disk. Thus, any | |
3422 | * lwb cannot be considered committed to stable storage, until its | |
3423 | * "previous" lwb is also committed to stable storage. This fact, | |
3424 | * coupled with the fact described above, means that itxs are | |
3425 | * committed in (roughly) the order in which they were generated. | |
3426 | * This is essential because itxs are dependent on prior itxs. | |
3427 | * Thus, we *must not* deem an itx as being committed to stable | |
3428 | * storage, until *all* prior itxs have also been committed to | |
3429 | * stable storage. | |
3430 | * | |
3431 | * To enforce this ordering of lwb zio's, while still leveraging as | |
3432 | * much of the underlying storage performance as possible, we rely | |
3433 | * on two fundamental concepts: | |
3434 | * | |
3435 | * 1. The creation and issuance of lwb zio's is protected by | |
1b2b0aca | 3436 | * the zilog's "zl_issuer_lock", which ensures only a single |
1ce23dca PS |
3437 | * thread is creating and/or issuing lwb's at a time |
3438 | * 2. The "previous" lwb is a child of the "current" lwb | |
2fe61a7e | 3439 | * (leveraging the zio parent-child dependency graph) |
1ce23dca PS |
3440 | * |
3441 | * By relying on this parent-child zio relationship, we can have | |
3442 | * many lwb zio's concurrently issued to the underlying storage, | |
3443 | * but the order in which they complete will be the same order in | |
3444 | * which they were created. | |
34dc7c2f BB |
3445 | */ |
3446 | void | |
572e2857 | 3447 | zil_commit(zilog_t *zilog, uint64_t foid) |
34dc7c2f | 3448 | { |
1ce23dca PS |
3449 | /* |
3450 | * We should never attempt to call zil_commit on a snapshot for | |
3451 | * a couple of reasons: | |
3452 | * | |
3453 | * 1. A snapshot may never be modified, thus it cannot have any | |
3454 | * in-flight itxs that would have modified the dataset. | |
3455 | * | |
3456 | * 2. By design, when zil_commit() is called, a commit itx will | |
3457 | * be assigned to this zilog; as a result, the zilog will be | |
3458 | * dirtied. We must not dirty the zilog of a snapshot; there's | |
3459 | * checks in the code that enforce this invariant, and will | |
3460 | * cause a panic if it's not upheld. | |
3461 | */ | |
3462 | ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE); | |
34dc7c2f | 3463 | |
572e2857 BB |
3464 | if (zilog->zl_sync == ZFS_SYNC_DISABLED) |
3465 | return; | |
34dc7c2f | 3466 | |
1ce23dca PS |
3467 | if (!spa_writeable(zilog->zl_spa)) { |
3468 | /* | |
3469 | * If the SPA is not writable, there should never be any | |
3470 | * pending itxs waiting to be committed to disk. If that | |
3471 | * weren't true, we'd skip writing those itxs out, and | |
2fe61a7e | 3472 | * would break the semantics of zil_commit(); thus, we're |
1ce23dca PS |
3473 | * verifying that truth before we return to the caller. |
3474 | */ | |
3475 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); | |
3476 | ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL); | |
3477 | for (int i = 0; i < TXG_SIZE; i++) | |
3478 | ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL); | |
3479 | return; | |
3480 | } | |
3481 | ||
3482 | /* | |
3483 | * If the ZIL is suspended, we don't want to dirty it by calling | |
3484 | * zil_commit_itx_assign() below, nor can we write out | |
3485 | * lwbs like would be done in zil_commit_write(). Thus, we | |
3486 | * simply rely on txg_wait_synced() to maintain the necessary | |
3487 | * semantics, and avoid calling those functions altogether. | |
3488 | */ | |
3489 | if (zilog->zl_suspend > 0) { | |
3490 | txg_wait_synced(zilog->zl_dmu_pool, 0); | |
3491 | return; | |
3492 | } | |
3493 | ||
2fe61a7e PS |
3494 | zil_commit_impl(zilog, foid); |
3495 | } | |
3496 | ||
3497 | void | |
3498 | zil_commit_impl(zilog_t *zilog, uint64_t foid) | |
3499 | { | |
fb087146 | 3500 | ZIL_STAT_BUMP(zilog, zil_commit_count); |
b6ad9671 | 3501 | |
1ce23dca PS |
3502 | /* |
3503 | * Move the "async" itxs for the specified foid to the "sync" | |
3504 | * queues, such that they will be later committed (or skipped) | |
3505 | * to an lwb when zil_process_commit_list() is called. | |
3506 | * | |
3507 | * Since these "async" itxs must be committed prior to this | |
3508 | * call to zil_commit returning, we must perform this operation | |
3509 | * before we call zil_commit_itx_assign(). | |
3510 | */ | |
572e2857 | 3511 | zil_async_to_sync(zilog, foid); |
34dc7c2f | 3512 | |
1ce23dca PS |
3513 | /* |
3514 | * We allocate a new "waiter" structure which will initially be | |
3515 | * linked to the commit itx using the itx's "itx_private" field. | |
3516 | * Since the commit itx doesn't represent any on-disk state, | |
3517 | * when it's committed to an lwb, rather than copying the its | |
3518 | * lr_t into the lwb's buffer, the commit itx's "waiter" will be | |
3519 | * added to the lwb's list of waiters. Then, when the lwb is | |
3520 | * committed to stable storage, each waiter in the lwb's list of | |
3521 | * waiters will be marked "done", and signalled. | |
3522 | * | |
3523 | * We must create the waiter and assign the commit itx prior to | |
3524 | * calling zil_commit_writer(), or else our specific commit itx | |
3525 | * is not guaranteed to be committed to an lwb prior to calling | |
3526 | * zil_commit_waiter(). | |
3527 | */ | |
3528 | zil_commit_waiter_t *zcw = zil_alloc_commit_waiter(); | |
3529 | zil_commit_itx_assign(zilog, zcw); | |
428870ff | 3530 | |
233425a1 | 3531 | uint64_t wtxg = zil_commit_writer(zilog, zcw); |
1ce23dca | 3532 | zil_commit_waiter(zilog, zcw); |
428870ff | 3533 | |
1ce23dca PS |
3534 | if (zcw->zcw_zio_error != 0) { |
3535 | /* | |
3536 | * If there was an error writing out the ZIL blocks that | |
3537 | * this thread is waiting on, then we fallback to | |
3538 | * relying on spa_sync() to write out the data this | |
3539 | * thread is waiting on. Obviously this has performance | |
3540 | * implications, but the expectation is for this to be | |
3541 | * an exceptional case, and shouldn't occur often. | |
3542 | */ | |
3543 | DTRACE_PROBE2(zil__commit__io__error, | |
3544 | zilog_t *, zilog, zil_commit_waiter_t *, zcw); | |
3545 | txg_wait_synced(zilog->zl_dmu_pool, 0); | |
233425a1 AM |
3546 | } else if (wtxg != 0) { |
3547 | txg_wait_synced(zilog->zl_dmu_pool, wtxg); | |
1ce23dca | 3548 | } |
8c0712fd | 3549 | |
1ce23dca | 3550 | zil_free_commit_waiter(zcw); |
428870ff BB |
3551 | } |
3552 | ||
34dc7c2f BB |
3553 | /* |
3554 | * Called in syncing context to free committed log blocks and update log header. | |
3555 | */ | |
3556 | void | |
3557 | zil_sync(zilog_t *zilog, dmu_tx_t *tx) | |
3558 | { | |
3559 | zil_header_t *zh = zil_header_in_syncing_context(zilog); | |
3560 | uint64_t txg = dmu_tx_get_txg(tx); | |
3561 | spa_t *spa = zilog->zl_spa; | |
428870ff | 3562 | uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK]; |
34dc7c2f BB |
3563 | lwb_t *lwb; |
3564 | ||
9babb374 BB |
3565 | /* |
3566 | * We don't zero out zl_destroy_txg, so make sure we don't try | |
3567 | * to destroy it twice. | |
3568 | */ | |
3569 | if (spa_sync_pass(spa) != 1) | |
3570 | return; | |
3571 | ||
152d6fda KJ |
3572 | zil_lwb_flush_wait_all(zilog, txg); |
3573 | ||
34dc7c2f BB |
3574 | mutex_enter(&zilog->zl_lock); |
3575 | ||
3576 | ASSERT(zilog->zl_stop_sync == 0); | |
3577 | ||
428870ff BB |
3578 | if (*replayed_seq != 0) { |
3579 | ASSERT(zh->zh_replay_seq < *replayed_seq); | |
3580 | zh->zh_replay_seq = *replayed_seq; | |
3581 | *replayed_seq = 0; | |
3582 | } | |
34dc7c2f BB |
3583 | |
3584 | if (zilog->zl_destroy_txg == txg) { | |
3585 | blkptr_t blk = zh->zh_log; | |
361a7e82 | 3586 | dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); |
34dc7c2f | 3587 | |
895e0313 | 3588 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); |
34dc7c2f | 3589 | |
861166b0 AZ |
3590 | memset(zh, 0, sizeof (zil_header_t)); |
3591 | memset(zilog->zl_replayed_seq, 0, | |
3592 | sizeof (zilog->zl_replayed_seq)); | |
34dc7c2f BB |
3593 | |
3594 | if (zilog->zl_keep_first) { | |
3595 | /* | |
3596 | * If this block was part of log chain that couldn't | |
3597 | * be claimed because a device was missing during | |
3598 | * zil_claim(), but that device later returns, | |
3599 | * then this block could erroneously appear valid. | |
3600 | * To guard against this, assign a new GUID to the new | |
3601 | * log chain so it doesn't matter what blk points to. | |
3602 | */ | |
3603 | zil_init_log_chain(zilog, &blk); | |
3604 | zh->zh_log = blk; | |
361a7e82 JP |
3605 | } else { |
3606 | /* | |
3607 | * A destroyed ZIL chain can't contain any TX_SETSAXATTR | |
3608 | * records. So, deactivate the feature for this dataset. | |
3609 | * We activate it again when we start a new ZIL chain. | |
3610 | */ | |
3611 | if (dsl_dataset_feature_is_active(ds, | |
3612 | SPA_FEATURE_ZILSAXATTR)) | |
3613 | dsl_dataset_deactivate_feature(ds, | |
3614 | SPA_FEATURE_ZILSAXATTR, tx); | |
34dc7c2f BB |
3615 | } |
3616 | } | |
3617 | ||
9babb374 | 3618 | while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { |
34dc7c2f | 3619 | zh->zh_log = lwb->lwb_blk; |
7381ddf1 AM |
3620 | if (lwb->lwb_state != LWB_STATE_FLUSH_DONE || |
3621 | lwb->lwb_max_txg > txg) | |
34dc7c2f BB |
3622 | break; |
3623 | list_remove(&zilog->zl_lwb_list, lwb); | |
1ce23dca PS |
3624 | zio_free(spa, txg, &lwb->lwb_blk); |
3625 | zil_free_lwb(zilog, lwb); | |
34dc7c2f BB |
3626 | |
3627 | /* | |
3628 | * If we don't have anything left in the lwb list then | |
3629 | * we've had an allocation failure and we need to zero | |
3630 | * out the zil_header blkptr so that we don't end | |
3631 | * up freeing the same block twice. | |
3632 | */ | |
895e0313 | 3633 | if (list_is_empty(&zilog->zl_lwb_list)) |
34dc7c2f BB |
3634 | BP_ZERO(&zh->zh_log); |
3635 | } | |
920dd524 | 3636 | |
34dc7c2f BB |
3637 | mutex_exit(&zilog->zl_lock); |
3638 | } | |
3639 | ||
1ce23dca PS |
3640 | static int |
3641 | zil_lwb_cons(void *vbuf, void *unused, int kmflag) | |
3642 | { | |
14e4e3cb | 3643 | (void) unused, (void) kmflag; |
1ce23dca PS |
3644 | lwb_t *lwb = vbuf; |
3645 | list_create(&lwb->lwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node)); | |
3646 | list_create(&lwb->lwb_waiters, sizeof (zil_commit_waiter_t), | |
3647 | offsetof(zil_commit_waiter_t, zcw_node)); | |
3648 | avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare, | |
3649 | sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node)); | |
3650 | mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL); | |
3651 | return (0); | |
3652 | } | |
3653 | ||
1ce23dca PS |
3654 | static void |
3655 | zil_lwb_dest(void *vbuf, void *unused) | |
3656 | { | |
14e4e3cb | 3657 | (void) unused; |
1ce23dca PS |
3658 | lwb_t *lwb = vbuf; |
3659 | mutex_destroy(&lwb->lwb_vdev_lock); | |
3660 | avl_destroy(&lwb->lwb_vdev_tree); | |
3661 | list_destroy(&lwb->lwb_waiters); | |
3662 | list_destroy(&lwb->lwb_itxs); | |
3663 | } | |
3664 | ||
34dc7c2f BB |
3665 | void |
3666 | zil_init(void) | |
3667 | { | |
3668 | zil_lwb_cache = kmem_cache_create("zil_lwb_cache", | |
1ce23dca PS |
3669 | sizeof (lwb_t), 0, zil_lwb_cons, zil_lwb_dest, NULL, NULL, NULL, 0); |
3670 | ||
3671 | zil_zcw_cache = kmem_cache_create("zil_zcw_cache", | |
3672 | sizeof (zil_commit_waiter_t), 0, NULL, NULL, NULL, NULL, NULL, 0); | |
b6ad9671 | 3673 | |
fb087146 AH |
3674 | zil_sums_init(&zil_sums_global); |
3675 | zil_kstats_global = kstat_create("zfs", 0, "zil", "misc", | |
d1d7e268 | 3676 | KSTAT_TYPE_NAMED, sizeof (zil_stats) / sizeof (kstat_named_t), |
b6ad9671 ED |
3677 | KSTAT_FLAG_VIRTUAL); |
3678 | ||
fb087146 AH |
3679 | if (zil_kstats_global != NULL) { |
3680 | zil_kstats_global->ks_data = &zil_stats; | |
3681 | zil_kstats_global->ks_update = zil_kstats_global_update; | |
3682 | zil_kstats_global->ks_private = NULL; | |
3683 | kstat_install(zil_kstats_global); | |
b6ad9671 | 3684 | } |
34dc7c2f BB |
3685 | } |
3686 | ||
3687 | void | |
3688 | zil_fini(void) | |
3689 | { | |
1ce23dca | 3690 | kmem_cache_destroy(zil_zcw_cache); |
34dc7c2f | 3691 | kmem_cache_destroy(zil_lwb_cache); |
b6ad9671 | 3692 | |
fb087146 AH |
3693 | if (zil_kstats_global != NULL) { |
3694 | kstat_delete(zil_kstats_global); | |
3695 | zil_kstats_global = NULL; | |
b6ad9671 | 3696 | } |
fb087146 AH |
3697 | |
3698 | zil_sums_fini(&zil_sums_global); | |
34dc7c2f BB |
3699 | } |
3700 | ||
428870ff BB |
3701 | void |
3702 | zil_set_sync(zilog_t *zilog, uint64_t sync) | |
3703 | { | |
3704 | zilog->zl_sync = sync; | |
3705 | } | |
3706 | ||
3707 | void | |
3708 | zil_set_logbias(zilog_t *zilog, uint64_t logbias) | |
3709 | { | |
3710 | zilog->zl_logbias = logbias; | |
3711 | } | |
3712 | ||
34dc7c2f BB |
3713 | zilog_t * |
3714 | zil_alloc(objset_t *os, zil_header_t *zh_phys) | |
3715 | { | |
3716 | zilog_t *zilog; | |
3717 | ||
79c76d5b | 3718 | zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); |
34dc7c2f BB |
3719 | |
3720 | zilog->zl_header = zh_phys; | |
3721 | zilog->zl_os = os; | |
3722 | zilog->zl_spa = dmu_objset_spa(os); | |
3723 | zilog->zl_dmu_pool = dmu_objset_pool(os); | |
3724 | zilog->zl_destroy_txg = TXG_INITIAL - 1; | |
428870ff BB |
3725 | zilog->zl_logbias = dmu_objset_logbias(os); |
3726 | zilog->zl_sync = dmu_objset_syncprop(os); | |
1ce23dca PS |
3727 | zilog->zl_dirty_max_txg = 0; |
3728 | zilog->zl_last_lwb_opened = NULL; | |
3729 | zilog->zl_last_lwb_latency = 0; | |
b8738257 | 3730 | zilog->zl_max_block_size = zil_maxblocksize; |
34dc7c2f BB |
3731 | |
3732 | mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); | |
1b2b0aca | 3733 | mutex_init(&zilog->zl_issuer_lock, NULL, MUTEX_DEFAULT, NULL); |
152d6fda | 3734 | mutex_init(&zilog->zl_lwb_io_lock, NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f | 3735 | |
1c27024e | 3736 | for (int i = 0; i < TXG_SIZE; i++) { |
572e2857 BB |
3737 | mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL, |
3738 | MUTEX_DEFAULT, NULL); | |
3739 | } | |
34dc7c2f BB |
3740 | |
3741 | list_create(&zilog->zl_lwb_list, sizeof (lwb_t), | |
3742 | offsetof(lwb_t, lwb_node)); | |
3743 | ||
572e2857 BB |
3744 | list_create(&zilog->zl_itx_commit_list, sizeof (itx_t), |
3745 | offsetof(itx_t, itx_node)); | |
3746 | ||
34dc7c2f | 3747 | cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL); |
152d6fda | 3748 | cv_init(&zilog->zl_lwb_io_cv, NULL, CV_DEFAULT, NULL); |
34dc7c2f BB |
3749 | |
3750 | return (zilog); | |
3751 | } | |
3752 | ||
3753 | void | |
3754 | zil_free(zilog_t *zilog) | |
3755 | { | |
d6320ddb | 3756 | int i; |
34dc7c2f BB |
3757 | |
3758 | zilog->zl_stop_sync = 1; | |
3759 | ||
13fe0198 MA |
3760 | ASSERT0(zilog->zl_suspend); |
3761 | ASSERT0(zilog->zl_suspending); | |
3762 | ||
3e31d2b0 | 3763 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); |
34dc7c2f BB |
3764 | list_destroy(&zilog->zl_lwb_list); |
3765 | ||
572e2857 BB |
3766 | ASSERT(list_is_empty(&zilog->zl_itx_commit_list)); |
3767 | list_destroy(&zilog->zl_itx_commit_list); | |
3768 | ||
d6320ddb | 3769 | for (i = 0; i < TXG_SIZE; i++) { |
572e2857 BB |
3770 | /* |
3771 | * It's possible for an itx to be generated that doesn't dirty | |
3772 | * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean() | |
3773 | * callback to remove the entry. We remove those here. | |
3774 | * | |
3775 | * Also free up the ziltest itxs. | |
3776 | */ | |
3777 | if (zilog->zl_itxg[i].itxg_itxs) | |
3778 | zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs); | |
3779 | mutex_destroy(&zilog->zl_itxg[i].itxg_lock); | |
3780 | } | |
3781 | ||
1b2b0aca | 3782 | mutex_destroy(&zilog->zl_issuer_lock); |
34dc7c2f | 3783 | mutex_destroy(&zilog->zl_lock); |
152d6fda | 3784 | mutex_destroy(&zilog->zl_lwb_io_lock); |
34dc7c2f | 3785 | |
34dc7c2f | 3786 | cv_destroy(&zilog->zl_cv_suspend); |
152d6fda | 3787 | cv_destroy(&zilog->zl_lwb_io_cv); |
34dc7c2f BB |
3788 | |
3789 | kmem_free(zilog, sizeof (zilog_t)); | |
3790 | } | |
3791 | ||
34dc7c2f BB |
3792 | /* |
3793 | * Open an intent log. | |
3794 | */ | |
3795 | zilog_t * | |
fb087146 | 3796 | zil_open(objset_t *os, zil_get_data_t *get_data, zil_sums_t *zil_sums) |
34dc7c2f BB |
3797 | { |
3798 | zilog_t *zilog = dmu_objset_zil(os); | |
3799 | ||
1ce23dca PS |
3800 | ASSERT3P(zilog->zl_get_data, ==, NULL); |
3801 | ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL); | |
3e31d2b0 ES |
3802 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); |
3803 | ||
34dc7c2f | 3804 | zilog->zl_get_data = get_data; |
fb087146 | 3805 | zilog->zl_sums = zil_sums; |
34dc7c2f BB |
3806 | |
3807 | return (zilog); | |
3808 | } | |
3809 | ||
3810 | /* | |
3811 | * Close an intent log. | |
3812 | */ | |
3813 | void | |
3814 | zil_close(zilog_t *zilog) | |
3815 | { | |
3e31d2b0 | 3816 | lwb_t *lwb; |
1ce23dca | 3817 | uint64_t txg; |
572e2857 | 3818 | |
1ce23dca PS |
3819 | if (!dmu_objset_is_snapshot(zilog->zl_os)) { |
3820 | zil_commit(zilog, 0); | |
3821 | } else { | |
895e0313 | 3822 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); |
1ce23dca PS |
3823 | ASSERT0(zilog->zl_dirty_max_txg); |
3824 | ASSERT3B(zilog_is_dirty(zilog), ==, B_FALSE); | |
3825 | } | |
572e2857 | 3826 | |
572e2857 | 3827 | mutex_enter(&zilog->zl_lock); |
3e31d2b0 | 3828 | lwb = list_tail(&zilog->zl_lwb_list); |
1ce23dca PS |
3829 | if (lwb == NULL) |
3830 | txg = zilog->zl_dirty_max_txg; | |
3831 | else | |
3832 | txg = MAX(zilog->zl_dirty_max_txg, lwb->lwb_max_txg); | |
572e2857 | 3833 | mutex_exit(&zilog->zl_lock); |
1ce23dca PS |
3834 | |
3835 | /* | |
152d6fda KJ |
3836 | * zl_lwb_max_issued_txg may be larger than lwb_max_txg. It depends |
3837 | * on the time when the dmu_tx transaction is assigned in | |
f63811f0 | 3838 | * zil_lwb_write_close(). |
152d6fda KJ |
3839 | */ |
3840 | mutex_enter(&zilog->zl_lwb_io_lock); | |
3841 | txg = MAX(zilog->zl_lwb_max_issued_txg, txg); | |
3842 | mutex_exit(&zilog->zl_lwb_io_lock); | |
3843 | ||
3844 | /* | |
3845 | * We need to use txg_wait_synced() to wait until that txg is synced. | |
3846 | * zil_sync() will guarantee all lwbs up to that txg have been | |
3847 | * written out, flushed, and cleaned. | |
1ce23dca PS |
3848 | */ |
3849 | if (txg != 0) | |
34dc7c2f | 3850 | txg_wait_synced(zilog->zl_dmu_pool, txg); |
55922e73 GW |
3851 | |
3852 | if (zilog_is_dirty(zilog)) | |
8e739b2c RE |
3853 | zfs_dbgmsg("zil (%px) is dirty, txg %llu", zilog, |
3854 | (u_longlong_t)txg); | |
50c957f7 | 3855 | if (txg < spa_freeze_txg(zilog->zl_spa)) |
55922e73 | 3856 | VERIFY(!zilog_is_dirty(zilog)); |
34dc7c2f | 3857 | |
34dc7c2f | 3858 | zilog->zl_get_data = NULL; |
3e31d2b0 ES |
3859 | |
3860 | /* | |
1ce23dca | 3861 | * We should have only one lwb left on the list; remove it now. |
3e31d2b0 ES |
3862 | */ |
3863 | mutex_enter(&zilog->zl_lock); | |
895e0313 | 3864 | lwb = list_remove_head(&zilog->zl_lwb_list); |
3e31d2b0 | 3865 | if (lwb != NULL) { |
895e0313 | 3866 | ASSERT(list_is_empty(&zilog->zl_lwb_list)); |
1ce23dca PS |
3867 | ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED); |
3868 | ||
3e31d2b0 | 3869 | zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); |
1ce23dca | 3870 | zil_free_lwb(zilog, lwb); |
3e31d2b0 ES |
3871 | } |
3872 | mutex_exit(&zilog->zl_lock); | |
34dc7c2f BB |
3873 | } |
3874 | ||
a926aab9 | 3875 | static const char *suspend_tag = "zil suspending"; |
13fe0198 | 3876 | |
34dc7c2f BB |
3877 | /* |
3878 | * Suspend an intent log. While in suspended mode, we still honor | |
3879 | * synchronous semantics, but we rely on txg_wait_synced() to do it. | |
13fe0198 MA |
3880 | * On old version pools, we suspend the log briefly when taking a |
3881 | * snapshot so that it will have an empty intent log. | |
3882 | * | |
3883 | * Long holds are not really intended to be used the way we do here -- | |
3884 | * held for such a short time. A concurrent caller of dsl_dataset_long_held() | |
3885 | * could fail. Therefore we take pains to only put a long hold if it is | |
3886 | * actually necessary. Fortunately, it will only be necessary if the | |
3887 | * objset is currently mounted (or the ZVOL equivalent). In that case it | |
3888 | * will already have a long hold, so we are not really making things any worse. | |
3889 | * | |
3890 | * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or | |
3891 | * zvol_state_t), and use their mechanism to prevent their hold from being | |
3892 | * dropped (e.g. VFS_HOLD()). However, that would be even more pain for | |
3893 | * very little gain. | |
3894 | * | |
3895 | * if cookiep == NULL, this does both the suspend & resume. | |
3896 | * Otherwise, it returns with the dataset "long held", and the cookie | |
3897 | * should be passed into zil_resume(). | |
34dc7c2f BB |
3898 | */ |
3899 | int | |
13fe0198 | 3900 | zil_suspend(const char *osname, void **cookiep) |
34dc7c2f | 3901 | { |
13fe0198 MA |
3902 | objset_t *os; |
3903 | zilog_t *zilog; | |
3904 | const zil_header_t *zh; | |
3905 | int error; | |
3906 | ||
3907 | error = dmu_objset_hold(osname, suspend_tag, &os); | |
3908 | if (error != 0) | |
3909 | return (error); | |
3910 | zilog = dmu_objset_zil(os); | |
34dc7c2f BB |
3911 | |
3912 | mutex_enter(&zilog->zl_lock); | |
13fe0198 MA |
3913 | zh = zilog->zl_header; |
3914 | ||
9babb374 | 3915 | if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */ |
34dc7c2f | 3916 | mutex_exit(&zilog->zl_lock); |
13fe0198 | 3917 | dmu_objset_rele(os, suspend_tag); |
2e528b49 | 3918 | return (SET_ERROR(EBUSY)); |
34dc7c2f | 3919 | } |
13fe0198 MA |
3920 | |
3921 | /* | |
3922 | * Don't put a long hold in the cases where we can avoid it. This | |
3923 | * is when there is no cookie so we are doing a suspend & resume | |
3924 | * (i.e. called from zil_vdev_offline()), and there's nothing to do | |
3925 | * for the suspend because it's already suspended, or there's no ZIL. | |
3926 | */ | |
3927 | if (cookiep == NULL && !zilog->zl_suspending && | |
3928 | (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) { | |
3929 | mutex_exit(&zilog->zl_lock); | |
3930 | dmu_objset_rele(os, suspend_tag); | |
3931 | return (0); | |
3932 | } | |
3933 | ||
3934 | dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag); | |
3935 | dsl_pool_rele(dmu_objset_pool(os), suspend_tag); | |
3936 | ||
3937 | zilog->zl_suspend++; | |
3938 | ||
3939 | if (zilog->zl_suspend > 1) { | |
34dc7c2f | 3940 | /* |
13fe0198 | 3941 | * Someone else is already suspending it. |
34dc7c2f BB |
3942 | * Just wait for them to finish. |
3943 | */ | |
13fe0198 | 3944 | |
34dc7c2f BB |
3945 | while (zilog->zl_suspending) |
3946 | cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock); | |
34dc7c2f | 3947 | mutex_exit(&zilog->zl_lock); |
13fe0198 MA |
3948 | |
3949 | if (cookiep == NULL) | |
3950 | zil_resume(os); | |
3951 | else | |
3952 | *cookiep = os; | |
3953 | return (0); | |
3954 | } | |
3955 | ||
3956 | /* | |
3957 | * If there is no pointer to an on-disk block, this ZIL must not | |
3958 | * be active (e.g. filesystem not mounted), so there's nothing | |
3959 | * to clean up. | |
3960 | */ | |
3961 | if (BP_IS_HOLE(&zh->zh_log)) { | |
3962 | ASSERT(cookiep != NULL); /* fast path already handled */ | |
3963 | ||
3964 | *cookiep = os; | |
3965 | mutex_exit(&zilog->zl_lock); | |
34dc7c2f BB |
3966 | return (0); |
3967 | } | |
13fe0198 | 3968 | |
4807c0ba TC |
3969 | /* |
3970 | * The ZIL has work to do. Ensure that the associated encryption | |
3971 | * key will remain mapped while we are committing the log by | |
3972 | * grabbing a reference to it. If the key isn't loaded we have no | |
3973 | * choice but to return an error until the wrapping key is loaded. | |
3974 | */ | |
52ce99dd TC |
3975 | if (os->os_encrypted && |
3976 | dsl_dataset_create_key_mapping(dmu_objset_ds(os)) != 0) { | |
4807c0ba TC |
3977 | zilog->zl_suspend--; |
3978 | mutex_exit(&zilog->zl_lock); | |
3979 | dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); | |
3980 | dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); | |
2ffd89fc | 3981 | return (SET_ERROR(EACCES)); |
4807c0ba TC |
3982 | } |
3983 | ||
34dc7c2f BB |
3984 | zilog->zl_suspending = B_TRUE; |
3985 | mutex_exit(&zilog->zl_lock); | |
3986 | ||
2fe61a7e PS |
3987 | /* |
3988 | * We need to use zil_commit_impl to ensure we wait for all | |
3989 | * LWB_STATE_OPENED and LWB_STATE_ISSUED lwbs to be committed | |
3990 | * to disk before proceeding. If we used zil_commit instead, it | |
3991 | * would just call txg_wait_synced(), because zl_suspend is set. | |
3992 | * txg_wait_synced() doesn't wait for these lwb's to be | |
900d09b2 | 3993 | * LWB_STATE_FLUSH_DONE before returning. |
2fe61a7e PS |
3994 | */ |
3995 | zil_commit_impl(zilog, 0); | |
3996 | ||
3997 | /* | |
900d09b2 PS |
3998 | * Now that we've ensured all lwb's are LWB_STATE_FLUSH_DONE, we |
3999 | * use txg_wait_synced() to ensure the data from the zilog has | |
4000 | * migrated to the main pool before calling zil_destroy(). | |
2fe61a7e PS |
4001 | */ |
4002 | txg_wait_synced(zilog->zl_dmu_pool, 0); | |
34dc7c2f BB |
4003 | |
4004 | zil_destroy(zilog, B_FALSE); | |
4005 | ||
4006 | mutex_enter(&zilog->zl_lock); | |
4007 | zilog->zl_suspending = B_FALSE; | |
4008 | cv_broadcast(&zilog->zl_cv_suspend); | |
4009 | mutex_exit(&zilog->zl_lock); | |
4010 | ||
52ce99dd TC |
4011 | if (os->os_encrypted) |
4012 | dsl_dataset_remove_key_mapping(dmu_objset_ds(os)); | |
4807c0ba | 4013 | |
13fe0198 MA |
4014 | if (cookiep == NULL) |
4015 | zil_resume(os); | |
4016 | else | |
4017 | *cookiep = os; | |
34dc7c2f BB |
4018 | return (0); |
4019 | } | |
4020 | ||
4021 | void | |
13fe0198 | 4022 | zil_resume(void *cookie) |
34dc7c2f | 4023 | { |
13fe0198 MA |
4024 | objset_t *os = cookie; |
4025 | zilog_t *zilog = dmu_objset_zil(os); | |
4026 | ||
34dc7c2f BB |
4027 | mutex_enter(&zilog->zl_lock); |
4028 | ASSERT(zilog->zl_suspend != 0); | |
4029 | zilog->zl_suspend--; | |
4030 | mutex_exit(&zilog->zl_lock); | |
13fe0198 MA |
4031 | dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag); |
4032 | dsl_dataset_rele(dmu_objset_ds(os), suspend_tag); | |
34dc7c2f BB |
4033 | } |
4034 | ||
4035 | typedef struct zil_replay_arg { | |
18168da7 | 4036 | zil_replay_func_t *const *zr_replay; |
34dc7c2f | 4037 | void *zr_arg; |
34dc7c2f | 4038 | boolean_t zr_byteswap; |
428870ff | 4039 | char *zr_lr; |
34dc7c2f BB |
4040 | } zil_replay_arg_t; |
4041 | ||
428870ff | 4042 | static int |
61868bb1 | 4043 | zil_replay_error(zilog_t *zilog, const lr_t *lr, int error) |
428870ff | 4044 | { |
eca7b760 | 4045 | char name[ZFS_MAX_DATASET_NAME_LEN]; |
428870ff BB |
4046 | |
4047 | zilog->zl_replaying_seq--; /* didn't actually replay this one */ | |
4048 | ||
4049 | dmu_objset_name(zilog->zl_os, name); | |
4050 | ||
4051 | cmn_err(CE_WARN, "ZFS replay transaction error %d, " | |
4052 | "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name, | |
4053 | (u_longlong_t)lr->lrc_seq, | |
4054 | (u_longlong_t)(lr->lrc_txtype & ~TX_CI), | |
4055 | (lr->lrc_txtype & TX_CI) ? "CI" : ""); | |
4056 | ||
4057 | return (error); | |
4058 | } | |
4059 | ||
4060 | static int | |
61868bb1 CS |
4061 | zil_replay_log_record(zilog_t *zilog, const lr_t *lr, void *zra, |
4062 | uint64_t claim_txg) | |
34dc7c2f BB |
4063 | { |
4064 | zil_replay_arg_t *zr = zra; | |
4065 | const zil_header_t *zh = zilog->zl_header; | |
4066 | uint64_t reclen = lr->lrc_reclen; | |
4067 | uint64_t txtype = lr->lrc_txtype; | |
428870ff | 4068 | int error = 0; |
34dc7c2f | 4069 | |
428870ff | 4070 | zilog->zl_replaying_seq = lr->lrc_seq; |
34dc7c2f BB |
4071 | |
4072 | if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ | |
428870ff BB |
4073 | return (0); |
4074 | ||
4075 | if (lr->lrc_txg < claim_txg) /* already committed */ | |
4076 | return (0); | |
34dc7c2f BB |
4077 | |
4078 | /* Strip case-insensitive bit, still present in log record */ | |
4079 | txtype &= ~TX_CI; | |
4080 | ||
428870ff BB |
4081 | if (txtype == 0 || txtype >= TX_MAX_TYPE) |
4082 | return (zil_replay_error(zilog, lr, EINVAL)); | |
4083 | ||
4084 | /* | |
4085 | * If this record type can be logged out of order, the object | |
4086 | * (lr_foid) may no longer exist. That's legitimate, not an error. | |
4087 | */ | |
4088 | if (TX_OOO(txtype)) { | |
4089 | error = dmu_object_info(zilog->zl_os, | |
50c957f7 | 4090 | LR_FOID_GET_OBJ(((lr_ooo_t *)lr)->lr_foid), NULL); |
428870ff BB |
4091 | if (error == ENOENT || error == EEXIST) |
4092 | return (0); | |
fb5f0bc8 BB |
4093 | } |
4094 | ||
34dc7c2f BB |
4095 | /* |
4096 | * Make a copy of the data so we can revise and extend it. | |
4097 | */ | |
861166b0 | 4098 | memcpy(zr->zr_lr, lr, reclen); |
428870ff BB |
4099 | |
4100 | /* | |
4101 | * If this is a TX_WRITE with a blkptr, suck in the data. | |
4102 | */ | |
4103 | if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { | |
4104 | error = zil_read_log_data(zilog, (lr_write_t *)lr, | |
4105 | zr->zr_lr + reclen); | |
13fe0198 | 4106 | if (error != 0) |
428870ff BB |
4107 | return (zil_replay_error(zilog, lr, error)); |
4108 | } | |
34dc7c2f BB |
4109 | |
4110 | /* | |
4111 | * The log block containing this lr may have been byteswapped | |
4112 | * so that we can easily examine common fields like lrc_txtype. | |
428870ff | 4113 | * However, the log is a mix of different record types, and only the |
34dc7c2f BB |
4114 | * replay vectors know how to byteswap their records. Therefore, if |
4115 | * the lr was byteswapped, undo it before invoking the replay vector. | |
4116 | */ | |
4117 | if (zr->zr_byteswap) | |
428870ff | 4118 | byteswap_uint64_array(zr->zr_lr, reclen); |
34dc7c2f BB |
4119 | |
4120 | /* | |
4121 | * We must now do two things atomically: replay this log record, | |
fb5f0bc8 BB |
4122 | * and update the log header sequence number to reflect the fact that |
4123 | * we did so. At the end of each replay function the sequence number | |
4124 | * is updated if we are in replay mode. | |
34dc7c2f | 4125 | */ |
428870ff | 4126 | error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap); |
13fe0198 | 4127 | if (error != 0) { |
34dc7c2f BB |
4128 | /* |
4129 | * The DMU's dnode layer doesn't see removes until the txg | |
4130 | * commits, so a subsequent claim can spuriously fail with | |
fb5f0bc8 | 4131 | * EEXIST. So if we receive any error we try syncing out |
428870ff BB |
4132 | * any removes then retry the transaction. Note that we |
4133 | * specify B_FALSE for byteswap now, so we don't do it twice. | |
34dc7c2f | 4134 | */ |
428870ff BB |
4135 | txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); |
4136 | error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE); | |
13fe0198 | 4137 | if (error != 0) |
428870ff | 4138 | return (zil_replay_error(zilog, lr, error)); |
34dc7c2f | 4139 | } |
428870ff | 4140 | return (0); |
34dc7c2f BB |
4141 | } |
4142 | ||
428870ff | 4143 | static int |
61868bb1 | 4144 | zil_incr_blks(zilog_t *zilog, const blkptr_t *bp, void *arg, uint64_t claim_txg) |
34dc7c2f | 4145 | { |
14e4e3cb AZ |
4146 | (void) bp, (void) arg, (void) claim_txg; |
4147 | ||
34dc7c2f | 4148 | zilog->zl_replay_blks++; |
428870ff BB |
4149 | |
4150 | return (0); | |
34dc7c2f BB |
4151 | } |
4152 | ||
4153 | /* | |
4154 | * If this dataset has a non-empty intent log, replay it and destroy it. | |
e197bb24 | 4155 | * Return B_TRUE if there were any entries to replay. |
34dc7c2f | 4156 | */ |
e197bb24 | 4157 | boolean_t |
18168da7 AZ |
4158 | zil_replay(objset_t *os, void *arg, |
4159 | zil_replay_func_t *const replay_func[TX_MAX_TYPE]) | |
34dc7c2f BB |
4160 | { |
4161 | zilog_t *zilog = dmu_objset_zil(os); | |
4162 | const zil_header_t *zh = zilog->zl_header; | |
4163 | zil_replay_arg_t zr; | |
4164 | ||
9babb374 | 4165 | if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) { |
e197bb24 | 4166 | return (zil_destroy(zilog, B_TRUE)); |
34dc7c2f BB |
4167 | } |
4168 | ||
34dc7c2f BB |
4169 | zr.zr_replay = replay_func; |
4170 | zr.zr_arg = arg; | |
34dc7c2f | 4171 | zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log); |
79c76d5b | 4172 | zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); |
34dc7c2f BB |
4173 | |
4174 | /* | |
4175 | * Wait for in-progress removes to sync before starting replay. | |
4176 | */ | |
4177 | txg_wait_synced(zilog->zl_dmu_pool, 0); | |
4178 | ||
fb5f0bc8 | 4179 | zilog->zl_replay = B_TRUE; |
428870ff | 4180 | zilog->zl_replay_time = ddi_get_lbolt(); |
34dc7c2f BB |
4181 | ASSERT(zilog->zl_replay_blks == 0); |
4182 | (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr, | |
b5256303 | 4183 | zh->zh_claim_txg, B_TRUE); |
00b46022 | 4184 | vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE); |
34dc7c2f BB |
4185 | |
4186 | zil_destroy(zilog, B_FALSE); | |
4187 | txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg); | |
fb5f0bc8 | 4188 | zilog->zl_replay = B_FALSE; |
e197bb24 AS |
4189 | |
4190 | return (B_TRUE); | |
34dc7c2f BB |
4191 | } |
4192 | ||
428870ff BB |
4193 | boolean_t |
4194 | zil_replaying(zilog_t *zilog, dmu_tx_t *tx) | |
34dc7c2f | 4195 | { |
428870ff BB |
4196 | if (zilog->zl_sync == ZFS_SYNC_DISABLED) |
4197 | return (B_TRUE); | |
34dc7c2f | 4198 | |
428870ff BB |
4199 | if (zilog->zl_replay) { |
4200 | dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); | |
4201 | zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] = | |
4202 | zilog->zl_replaying_seq; | |
4203 | return (B_TRUE); | |
34dc7c2f BB |
4204 | } |
4205 | ||
428870ff | 4206 | return (B_FALSE); |
34dc7c2f | 4207 | } |
9babb374 | 4208 | |
9babb374 | 4209 | int |
a1d477c2 | 4210 | zil_reset(const char *osname, void *arg) |
9babb374 | 4211 | { |
14e4e3cb | 4212 | (void) arg; |
9babb374 | 4213 | |
14e4e3cb | 4214 | int error = zil_suspend(osname, NULL); |
2ffd89fc PZ |
4215 | /* EACCES means crypto key not loaded */ |
4216 | if ((error == EACCES) || (error == EBUSY)) | |
4217 | return (SET_ERROR(error)); | |
13fe0198 | 4218 | if (error != 0) |
2e528b49 | 4219 | return (SET_ERROR(EEXIST)); |
13fe0198 | 4220 | return (0); |
9babb374 | 4221 | } |
c409e464 | 4222 | |
0f699108 AZ |
4223 | EXPORT_SYMBOL(zil_alloc); |
4224 | EXPORT_SYMBOL(zil_free); | |
4225 | EXPORT_SYMBOL(zil_open); | |
4226 | EXPORT_SYMBOL(zil_close); | |
4227 | EXPORT_SYMBOL(zil_replay); | |
4228 | EXPORT_SYMBOL(zil_replaying); | |
4229 | EXPORT_SYMBOL(zil_destroy); | |
4230 | EXPORT_SYMBOL(zil_destroy_sync); | |
4231 | EXPORT_SYMBOL(zil_itx_create); | |
4232 | EXPORT_SYMBOL(zil_itx_destroy); | |
4233 | EXPORT_SYMBOL(zil_itx_assign); | |
4234 | EXPORT_SYMBOL(zil_commit); | |
0f699108 AZ |
4235 | EXPORT_SYMBOL(zil_claim); |
4236 | EXPORT_SYMBOL(zil_check_log_chain); | |
4237 | EXPORT_SYMBOL(zil_sync); | |
4238 | EXPORT_SYMBOL(zil_clean); | |
4239 | EXPORT_SYMBOL(zil_suspend); | |
4240 | EXPORT_SYMBOL(zil_resume); | |
1ce23dca | 4241 | EXPORT_SYMBOL(zil_lwb_add_block); |
0f699108 AZ |
4242 | EXPORT_SYMBOL(zil_bp_tree_add); |
4243 | EXPORT_SYMBOL(zil_set_sync); | |
4244 | EXPORT_SYMBOL(zil_set_logbias); | |
fb087146 AH |
4245 | EXPORT_SYMBOL(zil_sums_init); |
4246 | EXPORT_SYMBOL(zil_sums_fini); | |
4247 | EXPORT_SYMBOL(zil_kstat_values_update); | |
0f699108 | 4248 | |
fdc2d303 | 4249 | ZFS_MODULE_PARAM(zfs, zfs_, commit_timeout_pct, UINT, ZMOD_RW, |
03fdcb9a | 4250 | "ZIL block open timeout percentage"); |
2fe61a7e | 4251 | |
0f740a4f AM |
4252 | ZFS_MODULE_PARAM(zfs_zil, zil_, min_commit_timeout, U64, ZMOD_RW, |
4253 | "Minimum delay we care for ZIL block commit"); | |
4254 | ||
03fdcb9a MM |
4255 | ZFS_MODULE_PARAM(zfs_zil, zil_, replay_disable, INT, ZMOD_RW, |
4256 | "Disable intent logging replay"); | |
c409e464 | 4257 | |
03fdcb9a MM |
4258 | ZFS_MODULE_PARAM(zfs_zil, zil_, nocacheflush, INT, ZMOD_RW, |
4259 | "Disable ZIL cache flushes"); | |
ee191e80 | 4260 | |
ab8d9c17 | 4261 | ZFS_MODULE_PARAM(zfs_zil, zil_, slog_bulk, U64, ZMOD_RW, |
03fdcb9a | 4262 | "Limit in bytes slog sync writes per commit"); |
b8738257 | 4263 | |
fdc2d303 | 4264 | ZFS_MODULE_PARAM(zfs_zil, zil_, maxblocksize, UINT, ZMOD_RW, |
03fdcb9a | 4265 | "Limit in bytes of ZIL log block size"); |