]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/zio.c
Disable high priority ZIO threads on FreeBSD and Linux
[mirror_zfs.git] / module / zfs / zio.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
1d3ba0bf 9 * or https://opensource.org/licenses/CDDL-1.0.
34dc7c2f
BB
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
d7cf06a2 23 * Copyright (c) 2011, 2022 by Delphix. All rights reserved.
a38718a6 24 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
cc99f275 25 * Copyright (c) 2017, Intel Corporation.
cac416f1 26 * Copyright (c) 2019, 2023, 2024, Klara Inc.
10b3c7f5 27 * Copyright (c) 2019, Allan Jude
f2286383 28 * Copyright (c) 2021, Datto, Inc.
34dc7c2f
BB
29 */
30
f1512ee6 31#include <sys/sysmacros.h>
34dc7c2f
BB
32#include <sys/zfs_context.h>
33#include <sys/fm/fs/zfs.h>
34#include <sys/spa.h>
35#include <sys/txg.h>
36#include <sys/spa_impl.h>
37#include <sys/vdev_impl.h>
1b939560 38#include <sys/vdev_trim.h>
34dc7c2f
BB
39#include <sys/zio_impl.h>
40#include <sys/zio_compress.h>
41#include <sys/zio_checksum.h>
428870ff
BB
42#include <sys/dmu_objset.h>
43#include <sys/arc.h>
67a1b037 44#include <sys/brt.h>
428870ff 45#include <sys/ddt.h>
9b67f605 46#include <sys/blkptr.h>
b0bc7a84 47#include <sys/zfeature.h>
d4a72f23 48#include <sys/dsl_scan.h>
3dfb57a3 49#include <sys/metaslab_impl.h>
193a37cb 50#include <sys/time.h>
e5d1c27e 51#include <sys/trace_zfs.h>
a6255b7f 52#include <sys/abd.h>
b5256303 53#include <sys/dsl_crypt.h>
3f387973 54#include <cityhash.h>
34dc7c2f 55
34dc7c2f
BB
56/*
57 * ==========================================================================
58 * I/O type descriptions
59 * ==========================================================================
60 */
18168da7 61const char *const zio_type_name[ZIO_TYPES] = {
3dfb57a3
DB
62 /*
63 * Note: Linux kernel thread name length is limited
64 * so these names will differ from upstream open zfs.
65 */
d7605ae7 66 "z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_flush", "z_trim"
428870ff 67};
34dc7c2f 68
27f2b90d 69int zio_dva_throttle_enabled = B_TRUE;
18168da7 70static int zio_deadman_log_all = B_FALSE;
3dfb57a3 71
34dc7c2f
BB
72/*
73 * ==========================================================================
74 * I/O kmem caches
75 * ==========================================================================
76 */
18168da7
AZ
77static kmem_cache_t *zio_cache;
78static kmem_cache_t *zio_link_cache;
34dc7c2f
BB
79kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
80kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
a6255b7f 81#if defined(ZFS_DEBUG) && !defined(_KERNEL)
18168da7
AZ
82static uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
83static uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
a6255b7f
DQ
84#endif
85
ad796b8a 86/* Mark IOs as "slow" if they take longer than 30 seconds */
fdc2d303 87static uint_t zio_slow_io_ms = (30 * MILLISEC);
34dc7c2f 88
fcff0f35
PD
89#define BP_SPANB(indblkshift, level) \
90 (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
91#define COMPARE_META_LEVEL 0x80000000ul
55d85d5a
GW
92/*
93 * The following actions directly effect the spa's sync-to-convergence logic.
94 * The values below define the sync pass when we start performing the action.
95 * Care should be taken when changing these values as they directly impact
96 * spa_sync() performance. Tuning these values may introduce subtle performance
97 * pathologies and should only be done in the context of performance analysis.
98 * These tunables will eventually be removed and replaced with #defines once
99 * enough analysis has been done to determine optimal values.
100 *
101 * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
102 * regular blocks are not deferred.
be89734a
MA
103 *
104 * Starting in sync pass 8 (zfs_sync_pass_dont_compress), we disable
105 * compression (including of metadata). In practice, we don't have this
106 * many sync passes, so this has no effect.
107 *
108 * The original intent was that disabling compression would help the sync
109 * passes to converge. However, in practice disabling compression increases
110 * the average number of sync passes, because when we turn compression off, a
111 * lot of block's size will change and thus we have to re-allocate (not
112 * overwrite) them. It also increases the number of 128KB allocations (e.g.
113 * for indirect blocks and spacemaps) because these will not be compressed.
114 * The 128K allocations are especially detrimental to performance on highly
115 * fragmented systems, which may have very few free segments of this size,
116 * and may need to load new metaslabs to satisfy 128K allocations.
55d85d5a 117 */
fdc2d303
RY
118
119/* defer frees starting in this pass */
120uint_t zfs_sync_pass_deferred_free = 2;
121
122/* don't compress starting in this pass */
123static uint_t zfs_sync_pass_dont_compress = 8;
124
125/* rewrite new bps starting in this pass */
126static uint_t zfs_sync_pass_rewrite = 2;
55d85d5a 127
34dc7c2f 128/*
b128c09f
BB
129 * An allocating zio is one that either currently has the DVA allocate
130 * stage set or will have it later in its lifetime.
34dc7c2f 131 */
428870ff
BB
132#define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE)
133
3c502d3b
MM
134/*
135 * Enable smaller cores by excluding metadata
136 * allocations as well.
137 */
138int zio_exclude_metadata = 0;
18168da7 139static int zio_requeue_io_start_cut_in_line = 1;
428870ff
BB
140
141#ifdef ZFS_DEBUG
18168da7 142static const int zio_buf_debug_limit = 16384;
428870ff 143#else
18168da7 144static const int zio_buf_debug_limit = 0;
428870ff 145#endif
34dc7c2f 146
da6b4005
NB
147static inline void __zio_execute(zio_t *zio);
148
3dfb57a3
DB
149static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t);
150
34dc7c2f
BB
151void
152zio_init(void)
153{
154 size_t c;
34dc7c2f 155
3941503c
BB
156 zio_cache = kmem_cache_create("zio_cache",
157 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
d164b209 158 zio_link_cache = kmem_cache_create("zio_link_cache",
6795a698 159 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
34dc7c2f 160
34dc7c2f
BB
161 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
162 size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
514d661c
AM
163 size_t align, cflags, data_cflags;
164 char name[32];
34dc7c2f 165
514d661c
AM
166 /*
167 * Create cache for each half-power of 2 size, starting from
168 * SPA_MINBLOCKSIZE. It should give us memory space efficiency
169 * of ~7/8, sufficient for transient allocations mostly using
170 * these caches.
171 */
172 size_t p2 = size;
f1512ee6 173 while (!ISP2(p2))
34dc7c2f 174 p2 &= p2 - 1;
514d661c
AM
175 if (!IS_P2ALIGNED(size, p2 / 2))
176 continue;
34dc7c2f 177
498877ba
MA
178#ifndef _KERNEL
179 /*
180 * If we are using watchpoints, put each buffer on its own page,
181 * to eliminate the performance overhead of trapping to the
182 * kernel when modifying a non-watched buffer that shares the
183 * page with a watched buffer.
184 */
185 if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE))
186 continue;
fcf64f45 187#endif
34dc7c2f 188
514d661c
AM
189 if (IS_P2ALIGNED(size, PAGESIZE))
190 align = PAGESIZE;
191 else
192 align = 1 << (highbit64(size ^ (size - 1)) - 1);
34dc7c2f 193
514d661c
AM
194 cflags = (zio_exclude_metadata || size > zio_buf_debug_limit) ?
195 KMC_NODEBUG : 0;
196 data_cflags = KMC_NODEBUG;
197 if (cflags == data_cflags) {
198 /*
199 * Resulting kmem caches would be identical.
200 * Save memory by creating only one.
201 */
202 (void) snprintf(name, sizeof (name),
203 "zio_buf_comb_%lu", (ulong_t)size);
204 zio_buf_cache[c] = kmem_cache_create(name, size, align,
205 NULL, NULL, NULL, NULL, NULL, cflags);
206 zio_data_buf_cache[c] = zio_buf_cache[c];
207 continue;
34dc7c2f 208 }
514d661c
AM
209 (void) snprintf(name, sizeof (name), "zio_buf_%lu",
210 (ulong_t)size);
211 zio_buf_cache[c] = kmem_cache_create(name, size, align,
212 NULL, NULL, NULL, NULL, NULL, cflags);
213
214 (void) snprintf(name, sizeof (name), "zio_data_buf_%lu",
215 (ulong_t)size);
216 zio_data_buf_cache[c] = kmem_cache_create(name, size, align,
217 NULL, NULL, NULL, NULL, NULL, data_cflags);
34dc7c2f
BB
218 }
219
220 while (--c != 0) {
221 ASSERT(zio_buf_cache[c] != NULL);
222 if (zio_buf_cache[c - 1] == NULL)
223 zio_buf_cache[c - 1] = zio_buf_cache[c];
224
225 ASSERT(zio_data_buf_cache[c] != NULL);
226 if (zio_data_buf_cache[c - 1] == NULL)
227 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
228 }
229
34dc7c2f 230 zio_inject_init();
9759c60f
ED
231
232 lz4_init();
34dc7c2f
BB
233}
234
235void
236zio_fini(void)
237{
309c32c9 238 size_t n = SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT;
34dc7c2f 239
a6255b7f 240#if defined(ZFS_DEBUG) && !defined(_KERNEL)
309c32c9
MG
241 for (size_t i = 0; i < n; i++) {
242 if (zio_buf_cache_allocs[i] != zio_buf_cache_frees[i])
a6255b7f 243 (void) printf("zio_fini: [%d] %llu != %llu\n",
309c32c9
MG
244 (int)((i + 1) << SPA_MINBLOCKSHIFT),
245 (long long unsigned)zio_buf_cache_allocs[i],
246 (long long unsigned)zio_buf_cache_frees[i]);
247 }
f1512ee6 248#endif
309c32c9
MG
249
250 /*
251 * The same kmem cache can show up multiple times in both zio_buf_cache
252 * and zio_data_buf_cache. Do a wasteful but trivially correct scan to
253 * sort it out.
254 */
255 for (size_t i = 0; i < n; i++) {
256 kmem_cache_t *cache = zio_buf_cache[i];
257 if (cache == NULL)
258 continue;
259 for (size_t j = i; j < n; j++) {
260 if (cache == zio_buf_cache[j])
261 zio_buf_cache[j] = NULL;
262 if (cache == zio_data_buf_cache[j])
263 zio_data_buf_cache[j] = NULL;
34dc7c2f 264 }
309c32c9
MG
265 kmem_cache_destroy(cache);
266 }
34dc7c2f 267
309c32c9
MG
268 for (size_t i = 0; i < n; i++) {
269 kmem_cache_t *cache = zio_data_buf_cache[i];
270 if (cache == NULL)
271 continue;
272 for (size_t j = i; j < n; j++) {
273 if (cache == zio_data_buf_cache[j])
274 zio_data_buf_cache[j] = NULL;
34dc7c2f 275 }
309c32c9
MG
276 kmem_cache_destroy(cache);
277 }
278
279 for (size_t i = 0; i < n; i++) {
280 VERIFY3P(zio_buf_cache[i], ==, NULL);
281 VERIFY3P(zio_data_buf_cache[i], ==, NULL);
34dc7c2f
BB
282 }
283
d164b209 284 kmem_cache_destroy(zio_link_cache);
34dc7c2f
BB
285 kmem_cache_destroy(zio_cache);
286
287 zio_inject_fini();
9759c60f
ED
288
289 lz4_fini();
34dc7c2f
BB
290}
291
292/*
293 * ==========================================================================
294 * Allocate and free I/O buffers
295 * ==========================================================================
296 */
297
adcea23c
AM
298#ifdef ZFS_DEBUG
299static const ulong_t zio_buf_canary = (ulong_t)0xdeadc0dedead210b;
300#endif
301
302/*
303 * Use empty space after the buffer to detect overflows.
304 *
305 * Since zio_init() creates kmem caches only for certain set of buffer sizes,
306 * allocations of different sizes may have some unused space after the data.
307 * Filling part of that space with a known pattern on allocation and checking
308 * it on free should allow us to detect some buffer overflows.
309 */
310static void
311zio_buf_put_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c)
312{
313#ifdef ZFS_DEBUG
314 size_t off = P2ROUNDUP(size, sizeof (ulong_t));
315 ulong_t *canary = p + off / sizeof (ulong_t);
316 size_t asize = (c + 1) << SPA_MINBLOCKSHIFT;
317 if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
318 cache[c] == cache[c + 1])
319 asize = (c + 2) << SPA_MINBLOCKSHIFT;
320 for (; off < asize; canary++, off += sizeof (ulong_t))
321 *canary = zio_buf_canary;
322#endif
323}
324
325static void
326zio_buf_check_canary(ulong_t *p, size_t size, kmem_cache_t **cache, size_t c)
327{
328#ifdef ZFS_DEBUG
329 size_t off = P2ROUNDUP(size, sizeof (ulong_t));
330 ulong_t *canary = p + off / sizeof (ulong_t);
331 size_t asize = (c + 1) << SPA_MINBLOCKSHIFT;
332 if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
333 cache[c] == cache[c + 1])
334 asize = (c + 2) << SPA_MINBLOCKSHIFT;
335 for (; off < asize; canary++, off += sizeof (ulong_t)) {
336 if (unlikely(*canary != zio_buf_canary)) {
337 PANIC("ZIO buffer overflow %p (%zu) + %zu %#lx != %#lx",
338 p, size, (canary - p) * sizeof (ulong_t),
339 *canary, zio_buf_canary);
340 }
341 }
342#endif
343}
344
34dc7c2f
BB
345/*
346 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a
347 * crashdump if the kernel panics, so use it judiciously. Obviously, it's
348 * useful to inspect ZFS metadata, but if possible, we should avoid keeping
349 * excess / transient data in-core during a crashdump.
350 */
351void *
352zio_buf_alloc(size_t size)
353{
354 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
355
63e3a861 356 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
a6255b7f
DQ
357#if defined(ZFS_DEBUG) && !defined(_KERNEL)
358 atomic_add_64(&zio_buf_cache_allocs[c], 1);
359#endif
34dc7c2f 360
adcea23c
AM
361 void *p = kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE);
362 zio_buf_put_canary(p, size, zio_buf_cache, c);
363 return (p);
34dc7c2f
BB
364}
365
366/*
367 * Use zio_data_buf_alloc to allocate data. The data will not appear in a
368 * crashdump if the kernel panics. This exists so that we will limit the amount
369 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount
370 * of kernel heap dumped to disk when the kernel panics)
371 */
372void *
373zio_data_buf_alloc(size_t size)
374{
375 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
376
63e3a861 377 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
34dc7c2f 378
adcea23c
AM
379 void *p = kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE);
380 zio_buf_put_canary(p, size, zio_data_buf_cache, c);
381 return (p);
34dc7c2f
BB
382}
383
384void
385zio_buf_free(void *buf, size_t size)
386{
387 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
388
63e3a861 389 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
a6255b7f
DQ
390#if defined(ZFS_DEBUG) && !defined(_KERNEL)
391 atomic_add_64(&zio_buf_cache_frees[c], 1);
392#endif
34dc7c2f 393
adcea23c 394 zio_buf_check_canary(buf, size, zio_buf_cache, c);
34dc7c2f
BB
395 kmem_cache_free(zio_buf_cache[c], buf);
396}
397
398void
399zio_data_buf_free(void *buf, size_t size)
400{
401 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
402
63e3a861 403 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
34dc7c2f 404
adcea23c 405 zio_buf_check_canary(buf, size, zio_data_buf_cache, c);
34dc7c2f
BB
406 kmem_cache_free(zio_data_buf_cache[c], buf);
407}
408
84c07ada
GN
409static void
410zio_abd_free(void *abd, size_t size)
411{
14e4e3cb 412 (void) size;
84c07ada
GN
413 abd_free((abd_t *)abd);
414}
415
34dc7c2f
BB
416/*
417 * ==========================================================================
418 * Push and pop I/O transform buffers
419 * ==========================================================================
420 */
d3c2ae1c 421void
a6255b7f 422zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize,
e9aa730c 423 zio_transform_func_t *transform)
34dc7c2f 424{
79c76d5b 425 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP);
34dc7c2f 426
a6255b7f 427 zt->zt_orig_abd = zio->io_abd;
b128c09f 428 zt->zt_orig_size = zio->io_size;
34dc7c2f 429 zt->zt_bufsize = bufsize;
b128c09f 430 zt->zt_transform = transform;
34dc7c2f
BB
431
432 zt->zt_next = zio->io_transform_stack;
433 zio->io_transform_stack = zt;
434
a6255b7f 435 zio->io_abd = data;
34dc7c2f
BB
436 zio->io_size = size;
437}
438
d3c2ae1c 439void
b128c09f 440zio_pop_transforms(zio_t *zio)
34dc7c2f 441{
b128c09f
BB
442 zio_transform_t *zt;
443
444 while ((zt = zio->io_transform_stack) != NULL) {
445 if (zt->zt_transform != NULL)
446 zt->zt_transform(zio,
a6255b7f 447 zt->zt_orig_abd, zt->zt_orig_size);
34dc7c2f 448
428870ff 449 if (zt->zt_bufsize != 0)
a6255b7f 450 abd_free(zio->io_abd);
34dc7c2f 451
a6255b7f 452 zio->io_abd = zt->zt_orig_abd;
b128c09f
BB
453 zio->io_size = zt->zt_orig_size;
454 zio->io_transform_stack = zt->zt_next;
34dc7c2f 455
b128c09f 456 kmem_free(zt, sizeof (zio_transform_t));
34dc7c2f
BB
457 }
458}
459
b128c09f
BB
460/*
461 * ==========================================================================
b5256303 462 * I/O transform callbacks for subblocks, decompression, and decryption
b128c09f
BB
463 * ==========================================================================
464 */
465static void
a6255b7f 466zio_subblock(zio_t *zio, abd_t *data, uint64_t size)
b128c09f
BB
467{
468 ASSERT(zio->io_size > size);
469
470 if (zio->io_type == ZIO_TYPE_READ)
a6255b7f 471 abd_copy(data, zio->io_abd, size);
b128c09f
BB
472}
473
474static void
a6255b7f 475zio_decompress(zio_t *zio, abd_t *data, uint64_t size)
b128c09f 476{
a6255b7f
DQ
477 if (zio->io_error == 0) {
478 void *tmp = abd_borrow_buf(data, size);
479 int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp),
10b3c7f5
MN
480 zio->io_abd, tmp, zio->io_size, size,
481 &zio->io_prop.zp_complevel);
a6255b7f
DQ
482 abd_return_buf_copy(data, tmp, size);
483
c3bd3fb4
TC
484 if (zio_injection_enabled && ret == 0)
485 ret = zio_handle_fault_injection(zio, EINVAL);
486
a6255b7f
DQ
487 if (ret != 0)
488 zio->io_error = SET_ERROR(EIO);
489 }
b128c09f
BB
490}
491
b5256303
TC
492static void
493zio_decrypt(zio_t *zio, abd_t *data, uint64_t size)
494{
495 int ret;
496 void *tmp;
497 blkptr_t *bp = zio->io_bp;
ae76f45c
TC
498 spa_t *spa = zio->io_spa;
499 uint64_t dsobj = zio->io_bookmark.zb_objset;
b5256303
TC
500 uint64_t lsize = BP_GET_LSIZE(bp);
501 dmu_object_type_t ot = BP_GET_TYPE(bp);
502 uint8_t salt[ZIO_DATA_SALT_LEN];
503 uint8_t iv[ZIO_DATA_IV_LEN];
504 uint8_t mac[ZIO_DATA_MAC_LEN];
505 boolean_t no_crypt = B_FALSE;
506
507 ASSERT(BP_USES_CRYPT(bp));
508 ASSERT3U(size, !=, 0);
509
510 if (zio->io_error != 0)
511 return;
512
513 /*
514 * Verify the cksum of MACs stored in an indirect bp. It will always
515 * be possible to verify this since it does not require an encryption
516 * key.
517 */
518 if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) {
519 zio_crypt_decode_mac_bp(bp, mac);
520
521 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
522 /*
523 * We haven't decompressed the data yet, but
524 * zio_crypt_do_indirect_mac_checksum() requires
525 * decompressed data to be able to parse out the MACs
526 * from the indirect block. We decompress it now and
527 * throw away the result after we are finished.
528 */
529 tmp = zio_buf_alloc(lsize);
530 ret = zio_decompress_data(BP_GET_COMPRESS(bp),
10b3c7f5
MN
531 zio->io_abd, tmp, zio->io_size, lsize,
532 &zio->io_prop.zp_complevel);
b5256303
TC
533 if (ret != 0) {
534 ret = SET_ERROR(EIO);
535 goto error;
536 }
537 ret = zio_crypt_do_indirect_mac_checksum(B_FALSE,
538 tmp, lsize, BP_SHOULD_BYTESWAP(bp), mac);
539 zio_buf_free(tmp, lsize);
540 } else {
541 ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE,
542 zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac);
543 }
544 abd_copy(data, zio->io_abd, size);
545
be9a5c35
TC
546 if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) {
547 ret = zio_handle_decrypt_injection(spa,
548 &zio->io_bookmark, ot, ECKSUM);
549 }
b5256303
TC
550 if (ret != 0)
551 goto error;
552
553 return;
554 }
555
556 /*
557 * If this is an authenticated block, just check the MAC. It would be
4938d01d
RY
558 * nice to separate this out into its own flag, but when this was done,
559 * we had run out of bits in what is now zio_flag_t. Future cleanup
560 * could make this a flag bit.
b5256303
TC
561 */
562 if (BP_IS_AUTHENTICATED(bp)) {
563 if (ot == DMU_OT_OBJSET) {
ae76f45c
TC
564 ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa,
565 dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp));
b5256303
TC
566 } else {
567 zio_crypt_decode_mac_bp(bp, mac);
ae76f45c
TC
568 ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj,
569 zio->io_abd, size, mac);
be9a5c35
TC
570 if (zio_injection_enabled && ret == 0) {
571 ret = zio_handle_decrypt_injection(spa,
572 &zio->io_bookmark, ot, ECKSUM);
573 }
b5256303
TC
574 }
575 abd_copy(data, zio->io_abd, size);
576
577 if (ret != 0)
578 goto error;
579
580 return;
581 }
582
583 zio_crypt_decode_params_bp(bp, salt, iv);
584
585 if (ot == DMU_OT_INTENT_LOG) {
586 tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t));
587 zio_crypt_decode_mac_zil(tmp, mac);
588 abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t));
589 } else {
590 zio_crypt_decode_mac_bp(bp, mac);
591 }
592
be9a5c35
TC
593 ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp),
594 BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data,
595 zio->io_abd, &no_crypt);
b5256303
TC
596 if (no_crypt)
597 abd_copy(data, zio->io_abd, size);
598
599 if (ret != 0)
600 goto error;
601
602 return;
603
604error:
605 /* assert that the key was found unless this was speculative */
be9a5c35 606 ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE));
b5256303
TC
607
608 /*
609 * If there was a decryption / authentication error return EIO as
610 * the io_error. If this was not a speculative zio, create an ereport.
611 */
612 if (ret == ECKSUM) {
a2c2ed1b 613 zio->io_error = SET_ERROR(EIO);
b5256303 614 if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
431083f7 615 spa_log_error(spa, &zio->io_bookmark,
493fcce9 616 BP_GET_LOGICAL_BIRTH(zio->io_bp));
1144586b 617 (void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
4f072827 618 spa, NULL, &zio->io_bookmark, zio, 0);
b5256303
TC
619 }
620 } else {
621 zio->io_error = ret;
622 }
623}
624
b128c09f
BB
625/*
626 * ==========================================================================
627 * I/O parent/child relationships and pipeline interlocks
628 * ==========================================================================
629 */
d164b209 630zio_t *
3dfb57a3 631zio_walk_parents(zio_t *cio, zio_link_t **zl)
d164b209 632{
d164b209 633 list_t *pl = &cio->io_parent_list;
b128c09f 634
3dfb57a3
DB
635 *zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl);
636 if (*zl == NULL)
d164b209
BB
637 return (NULL);
638
3dfb57a3
DB
639 ASSERT((*zl)->zl_child == cio);
640 return ((*zl)->zl_parent);
d164b209
BB
641}
642
643zio_t *
3dfb57a3 644zio_walk_children(zio_t *pio, zio_link_t **zl)
d164b209 645{
d164b209
BB
646 list_t *cl = &pio->io_child_list;
647
a8b2e306
TC
648 ASSERT(MUTEX_HELD(&pio->io_lock));
649
3dfb57a3
DB
650 *zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl);
651 if (*zl == NULL)
d164b209
BB
652 return (NULL);
653
3dfb57a3
DB
654 ASSERT((*zl)->zl_parent == pio);
655 return ((*zl)->zl_child);
d164b209
BB
656}
657
658zio_t *
659zio_unique_parent(zio_t *cio)
660{
3dfb57a3
DB
661 zio_link_t *zl = NULL;
662 zio_t *pio = zio_walk_parents(cio, &zl);
d164b209 663
3dfb57a3 664 VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL);
d164b209
BB
665 return (pio);
666}
667
668void
669zio_add_child(zio_t *pio, zio_t *cio)
b128c09f 670{
d164b209
BB
671 /*
672 * Logical I/Os can have logical, gang, or vdev children.
673 * Gang I/Os can have gang or vdev children.
674 * Vdev I/Os can only have vdev children.
675 * The following ASSERT captures all of these constraints.
676 */
1ce23dca 677 ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
d164b209 678
3afdc97d
AM
679 /* Parent should not have READY stage if child doesn't have it. */
680 IMPLY((cio->io_pipeline & ZIO_STAGE_READY) == 0 &&
681 (cio->io_child_type != ZIO_CHILD_VDEV),
682 (pio->io_pipeline & ZIO_STAGE_READY) == 0);
683
b4a08730 684 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
d164b209
BB
685 zl->zl_parent = pio;
686 zl->zl_child = cio;
687
b128c09f 688 mutex_enter(&pio->io_lock);
a8b2e306 689 mutex_enter(&cio->io_lock);
d164b209
BB
690
691 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
692
b4a08730 693 uint64_t *countp = pio->io_children[cio->io_child_type];
1c27024e 694 for (int w = 0; w < ZIO_WAIT_TYPES; w++)
b4a08730 695 countp[w] += !cio->io_state[w];
d164b209
BB
696
697 list_insert_head(&pio->io_child_list, zl);
698 list_insert_head(&cio->io_parent_list, zl);
699
d164b209 700 mutex_exit(&cio->io_lock);
a8b2e306 701 mutex_exit(&pio->io_lock);
b128c09f
BB
702}
703
b4a08730
AM
704void
705zio_add_child_first(zio_t *pio, zio_t *cio)
706{
707 /*
708 * Logical I/Os can have logical, gang, or vdev children.
709 * Gang I/Os can have gang or vdev children.
710 * Vdev I/Os can only have vdev children.
711 * The following ASSERT captures all of these constraints.
712 */
713 ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
714
3afdc97d
AM
715 /* Parent should not have READY stage if child doesn't have it. */
716 IMPLY((cio->io_pipeline & ZIO_STAGE_READY) == 0 &&
717 (cio->io_child_type != ZIO_CHILD_VDEV),
718 (pio->io_pipeline & ZIO_STAGE_READY) == 0);
719
b4a08730
AM
720 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
721 zl->zl_parent = pio;
722 zl->zl_child = cio;
723
724 ASSERT(list_is_empty(&cio->io_parent_list));
725 list_insert_head(&cio->io_parent_list, zl);
726
727 mutex_enter(&pio->io_lock);
728
729 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
730
731 uint64_t *countp = pio->io_children[cio->io_child_type];
732 for (int w = 0; w < ZIO_WAIT_TYPES; w++)
733 countp[w] += !cio->io_state[w];
734
735 list_insert_head(&pio->io_child_list, zl);
736
737 mutex_exit(&pio->io_lock);
738}
739
34dc7c2f 740static void
d164b209 741zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl)
b128c09f 742{
d164b209
BB
743 ASSERT(zl->zl_parent == pio);
744 ASSERT(zl->zl_child == cio);
b128c09f
BB
745
746 mutex_enter(&pio->io_lock);
a8b2e306 747 mutex_enter(&cio->io_lock);
d164b209
BB
748
749 list_remove(&pio->io_child_list, zl);
750 list_remove(&cio->io_parent_list, zl);
751
d164b209 752 mutex_exit(&cio->io_lock);
a8b2e306 753 mutex_exit(&pio->io_lock);
d164b209 754 kmem_cache_free(zio_link_cache, zl);
b128c09f
BB
755}
756
757static boolean_t
ddc751d5 758zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait)
34dc7c2f 759{
b128c09f
BB
760 boolean_t waiting = B_FALSE;
761
762 mutex_enter(&zio->io_lock);
763 ASSERT(zio->io_stall == NULL);
ddc751d5
GW
764 for (int c = 0; c < ZIO_CHILD_TYPES; c++) {
765 if (!(ZIO_CHILD_BIT_IS_SET(childbits, c)))
766 continue;
767
768 uint64_t *countp = &zio->io_children[c][wait];
769 if (*countp != 0) {
770 zio->io_stage >>= 1;
771 ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN);
772 zio->io_stall = countp;
773 waiting = B_TRUE;
774 break;
775 }
b128c09f
BB
776 }
777 mutex_exit(&zio->io_lock);
b128c09f
BB
778 return (waiting);
779}
34dc7c2f 780
bf701a83
BB
781__attribute__((always_inline))
782static inline void
62840030
MA
783zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait,
784 zio_t **next_to_executep)
b128c09f
BB
785{
786 uint64_t *countp = &pio->io_children[zio->io_child_type][wait];
787 int *errorp = &pio->io_child_error[zio->io_child_type];
34dc7c2f 788
b128c09f
BB
789 mutex_enter(&pio->io_lock);
790 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
791 *errorp = zio_worst_error(*errorp, zio->io_error);
792 pio->io_reexecute |= zio->io_reexecute;
793 ASSERT3U(*countp, >, 0);
e8b96c60
MA
794
795 (*countp)--;
796
797 if (*countp == 0 && pio->io_stall == countp) {
3dfb57a3
DB
798 zio_taskq_type_t type =
799 pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE :
800 ZIO_TASKQ_INTERRUPT;
b128c09f
BB
801 pio->io_stall = NULL;
802 mutex_exit(&pio->io_lock);
62840030 803
3dfb57a3 804 /*
62840030 805 * If we can tell the caller to execute this parent next, do
a6edc0ad
RN
806 * so. We do this if the parent's zio type matches the child's
807 * type, or if it's a zio_null() with no done callback, and so
808 * has no actual work to do. Otherwise dispatch the parent zio
809 * in its own taskq.
62840030
MA
810 *
811 * Having the caller execute the parent when possible reduces
812 * locking on the zio taskq's, reduces context switch
813 * overhead, and has no recursion penalty. Note that one
814 * read from disk typically causes at least 3 zio's: a
815 * zio_null(), the logical zio_read(), and then a physical
816 * zio. When the physical ZIO completes, we are able to call
817 * zio_done() on all 3 of these zio's from one invocation of
818 * zio_execute() by returning the parent back to
819 * zio_execute(). Since the parent isn't executed until this
820 * thread returns back to zio_execute(), the caller should do
821 * so promptly.
822 *
823 * In other cases, dispatching the parent prevents
824 * overflowing the stack when we have deeply nested
825 * parent-child relationships, as we do with the "mega zio"
826 * of writes for spa_sync(), and the chain of ZIL blocks.
3dfb57a3 827 */
ffd2e15d 828 if (next_to_executep != NULL && *next_to_executep == NULL &&
a6edc0ad
RN
829 (pio->io_type == zio->io_type ||
830 (pio->io_type == ZIO_TYPE_NULL && !pio->io_done))) {
62840030
MA
831 *next_to_executep = pio;
832 } else {
833 zio_taskq_dispatch(pio, type, B_FALSE);
834 }
b128c09f
BB
835 } else {
836 mutex_exit(&pio->io_lock);
34dc7c2f
BB
837 }
838}
839
b128c09f
BB
840static void
841zio_inherit_child_errors(zio_t *zio, enum zio_child c)
842{
843 if (zio->io_child_error[c] != 0 && zio->io_error == 0)
844 zio->io_error = zio->io_child_error[c];
845}
846
3dfb57a3 847int
64fc7762 848zio_bookmark_compare(const void *x1, const void *x2)
3dfb57a3
DB
849{
850 const zio_t *z1 = x1;
851 const zio_t *z2 = x2;
3dfb57a3 852
64fc7762
MA
853 if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset)
854 return (-1);
855 if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset)
856 return (1);
3dfb57a3 857
64fc7762
MA
858 if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object)
859 return (-1);
860 if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object)
861 return (1);
3dfb57a3 862
64fc7762
MA
863 if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level)
864 return (-1);
865 if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level)
866 return (1);
867
868 if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid)
869 return (-1);
870 if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid)
871 return (1);
872
873 if (z1 < z2)
874 return (-1);
875 if (z1 > z2)
876 return (1);
877
878 return (0);
3dfb57a3
DB
879}
880
34dc7c2f
BB
881/*
882 * ==========================================================================
b128c09f 883 * Create the various types of I/O (read, write, free, etc)
34dc7c2f
BB
884 * ==========================================================================
885 */
886static zio_t *
428870ff 887zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
a6255b7f 888 abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done,
2aa34383 889 void *private, zio_type_t type, zio_priority_t priority,
4938d01d 890 zio_flag_t flags, vdev_t *vd, uint64_t offset,
2aa34383
DK
891 const zbookmark_phys_t *zb, enum zio_stage stage,
892 enum zio_stage pipeline)
34dc7c2f
BB
893{
894 zio_t *zio;
895
1b939560 896 IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE);
2aa34383 897 ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0);
b128c09f
BB
898 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
899
900 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
901 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
902 ASSERT(vd || stage == ZIO_STAGE_OPEN);
34dc7c2f 903
b5256303 904 IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0);
2aa34383 905
79c76d5b 906 zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
861166b0 907 memset(zio, 0, sizeof (zio_t));
3941503c 908
448d7aaa 909 mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL);
3941503c
BB
910 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
911
912 list_create(&zio->io_parent_list, sizeof (zio_link_t),
913 offsetof(zio_link_t, zl_parent_node));
914 list_create(&zio->io_child_list, sizeof (zio_link_t),
915 offsetof(zio_link_t, zl_child_node));
4e21fd06 916 metaslab_trace_init(&zio->io_alloc_list);
d164b209 917
b128c09f
BB
918 if (vd != NULL)
919 zio->io_child_type = ZIO_CHILD_VDEV;
920 else if (flags & ZIO_FLAG_GANG_CHILD)
921 zio->io_child_type = ZIO_CHILD_GANG;
428870ff
BB
922 else if (flags & ZIO_FLAG_DDT_CHILD)
923 zio->io_child_type = ZIO_CHILD_DDT;
b128c09f
BB
924 else
925 zio->io_child_type = ZIO_CHILD_LOGICAL;
926
34dc7c2f 927 if (bp != NULL) {
428870ff 928 if (type != ZIO_TYPE_WRITE ||
b4a08730
AM
929 zio->io_child_type == ZIO_CHILD_DDT) {
930 zio->io_bp_copy = *bp;
b128c09f 931 zio->io_bp = &zio->io_bp_copy; /* so caller can free */
b4a08730
AM
932 } else {
933 zio->io_bp = (blkptr_t *)bp;
934 }
935 zio->io_bp_orig = *bp;
9babb374 936 if (zio->io_child_type == ZIO_CHILD_LOGICAL)
b128c09f 937 zio->io_logical = zio;
9babb374
BB
938 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp))
939 pipeline |= ZIO_GANG_STAGES;
34dc7c2f 940 }
b128c09f
BB
941
942 zio->io_spa = spa;
943 zio->io_txg = txg;
34dc7c2f
BB
944 zio->io_done = done;
945 zio->io_private = private;
946 zio->io_type = type;
947 zio->io_priority = priority;
b128c09f
BB
948 zio->io_vd = vd;
949 zio->io_offset = offset;
a6255b7f 950 zio->io_orig_abd = zio->io_abd = data;
2aa34383
DK
951 zio->io_orig_size = zio->io_size = psize;
952 zio->io_lsize = lsize;
b128c09f
BB
953 zio->io_orig_flags = zio->io_flags = flags;
954 zio->io_orig_stage = zio->io_stage = stage;
955 zio->io_orig_pipeline = zio->io_pipeline = pipeline;
3dfb57a3 956 zio->io_pipeline_trace = ZIO_STAGE_OPEN;
3bd4df38 957 zio->io_allocator = ZIO_ALLOCATOR_NONE;
34dc7c2f 958
3afdc97d
AM
959 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY) ||
960 (pipeline & ZIO_STAGE_READY) == 0;
d164b209
BB
961 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE);
962
b128c09f
BB
963 if (zb != NULL)
964 zio->io_bookmark = *zb;
965
966 if (pio != NULL) {
1b50749c 967 zio->io_metaslab_class = pio->io_metaslab_class;
b128c09f 968 if (zio->io_logical == NULL)
34dc7c2f 969 zio->io_logical = pio->io_logical;
9babb374
BB
970 if (zio->io_child_type == ZIO_CHILD_GANG)
971 zio->io_gang_leader = pio->io_gang_leader;
b4a08730 972 zio_add_child_first(pio, zio);
34dc7c2f
BB
973 }
974
a38718a6
GA
975 taskq_init_ent(&zio->io_tqent);
976
34dc7c2f
BB
977 return (zio);
978}
979
e8cf3a4f 980void
b128c09f 981zio_destroy(zio_t *zio)
34dc7c2f 982{
4e21fd06 983 metaslab_trace_fini(&zio->io_alloc_list);
3941503c
BB
984 list_destroy(&zio->io_parent_list);
985 list_destroy(&zio->io_child_list);
986 mutex_destroy(&zio->io_lock);
987 cv_destroy(&zio->io_cv);
b128c09f 988 kmem_cache_free(zio_cache, zio);
34dc7c2f
BB
989}
990
3afdc97d
AM
991/*
992 * ZIO intended to be between others. Provides synchronization at READY
993 * and DONE pipeline stages and calls the respective callbacks.
994 */
34dc7c2f 995zio_t *
d164b209 996zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done,
4938d01d 997 void *private, zio_flag_t flags)
34dc7c2f
BB
998{
999 zio_t *zio;
1000
2aa34383 1001 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
d164b209 1002 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
b128c09f 1003 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE);
34dc7c2f
BB
1004
1005 return (zio);
1006}
1007
3afdc97d
AM
1008/*
1009 * ZIO intended to be a root of a tree. Unlike null ZIO does not have a
1010 * READY pipeline stage (is ready on creation), so it should not be used
1011 * as child of any ZIO that may need waiting for grandchildren READY stage
1012 * (any other ZIO type).
1013 */
34dc7c2f 1014zio_t *
4938d01d 1015zio_root(spa_t *spa, zio_done_func_t *done, void *private, zio_flag_t flags)
34dc7c2f 1016{
3afdc97d
AM
1017 zio_t *zio;
1018
1019 zio = zio_create(NULL, spa, 0, NULL, NULL, 0, 0, done, private,
1020 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, NULL, 0, NULL,
1021 ZIO_STAGE_OPEN, ZIO_ROOT_PIPELINE);
1022
1023 return (zio);
34dc7c2f
BB
1024}
1025
bc67cba7
PZ
1026static int
1027zfs_blkptr_verify_log(spa_t *spa, const blkptr_t *bp,
1028 enum blk_verify_flag blk_verify, const char *fmt, ...)
1029{
1030 va_list adx;
1031 char buf[256];
1032
1033 va_start(adx, fmt);
1034 (void) vsnprintf(buf, sizeof (buf), fmt, adx);
1035 va_end(adx);
1036
3095ca91
MA
1037 zfs_dbgmsg("bad blkptr at %px: "
1038 "DVA[0]=%#llx/%#llx "
1039 "DVA[1]=%#llx/%#llx "
1040 "DVA[2]=%#llx/%#llx "
1041 "prop=%#llx "
1042 "pad=%#llx,%#llx "
1043 "phys_birth=%#llx "
1044 "birth=%#llx "
1045 "fill=%#llx "
1046 "cksum=%#llx/%#llx/%#llx/%#llx",
1047 bp,
1048 (long long)bp->blk_dva[0].dva_word[0],
1049 (long long)bp->blk_dva[0].dva_word[1],
1050 (long long)bp->blk_dva[1].dva_word[0],
1051 (long long)bp->blk_dva[1].dva_word[1],
1052 (long long)bp->blk_dva[2].dva_word[0],
1053 (long long)bp->blk_dva[2].dva_word[1],
1054 (long long)bp->blk_prop,
1055 (long long)bp->blk_pad[0],
1056 (long long)bp->blk_pad[1],
493fcce9
GW
1057 (long long)BP_GET_PHYSICAL_BIRTH(bp),
1058 (long long)BP_GET_LOGICAL_BIRTH(bp),
3095ca91
MA
1059 (long long)bp->blk_fill,
1060 (long long)bp->blk_cksum.zc_word[0],
1061 (long long)bp->blk_cksum.zc_word[1],
1062 (long long)bp->blk_cksum.zc_word[2],
1063 (long long)bp->blk_cksum.zc_word[3]);
bc67cba7
PZ
1064 switch (blk_verify) {
1065 case BLK_VERIFY_HALT:
1066 zfs_panic_recover("%s: %s", spa_name(spa), buf);
1067 break;
1068 case BLK_VERIFY_LOG:
1069 zfs_dbgmsg("%s: %s", spa_name(spa), buf);
1070 break;
1071 case BLK_VERIFY_ONLY:
1072 break;
1073 }
1074
1075 return (1);
1076}
1077
1078/*
1079 * Verify the block pointer fields contain reasonable values. This means
1080 * it only contains known object types, checksum/compression identifiers,
1081 * block sizes within the maximum allowed limits, valid DVAs, etc.
1082 *
1083 * If everything checks out B_TRUE is returned. The zfs_blkptr_verify
1084 * argument controls the behavior when an invalid field is detected.
1085 *
3095ca91
MA
1086 * Values for blk_verify_flag:
1087 * BLK_VERIFY_ONLY: evaluate the block
1088 * BLK_VERIFY_LOG: evaluate the block and log problems
1089 * BLK_VERIFY_HALT: call zfs_panic_recover on error
1090 *
1091 * Values for blk_config_flag:
1092 * BLK_CONFIG_HELD: caller holds SCL_VDEV for writer
1093 * BLK_CONFIG_NEEDED: caller holds no config lock, SCL_VDEV will be
1094 * obtained for reader
1095 * BLK_CONFIG_SKIP: skip checks which require SCL_VDEV, for better
1096 * performance
bc67cba7
PZ
1097 */
1098boolean_t
3095ca91
MA
1099zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp,
1100 enum blk_config_flag blk_config, enum blk_verify_flag blk_verify)
63e3a861 1101{
bc67cba7
PZ
1102 int errors = 0;
1103
63e3a861 1104 if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) {
bc67cba7 1105 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
3095ca91 1106 "blkptr at %px has invalid TYPE %llu",
63e3a861
MA
1107 bp, (longlong_t)BP_GET_TYPE(bp));
1108 }
2cd0f98f 1109 if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS) {
bc67cba7 1110 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
3095ca91 1111 "blkptr at %px has invalid CHECKSUM %llu",
63e3a861
MA
1112 bp, (longlong_t)BP_GET_CHECKSUM(bp));
1113 }
2cd0f98f 1114 if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS) {
bc67cba7 1115 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
3095ca91 1116 "blkptr at %px has invalid COMPRESS %llu",
63e3a861
MA
1117 bp, (longlong_t)BP_GET_COMPRESS(bp));
1118 }
1119 if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) {
bc67cba7 1120 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
3095ca91 1121 "blkptr at %px has invalid LSIZE %llu",
63e3a861
MA
1122 bp, (longlong_t)BP_GET_LSIZE(bp));
1123 }
1124 if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) {
bc67cba7 1125 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
3095ca91 1126 "blkptr at %px has invalid PSIZE %llu",
63e3a861
MA
1127 bp, (longlong_t)BP_GET_PSIZE(bp));
1128 }
1129
1130 if (BP_IS_EMBEDDED(bp)) {
746d4a45 1131 if (BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES) {
bc67cba7 1132 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
3095ca91 1133 "blkptr at %px has invalid ETYPE %llu",
63e3a861
MA
1134 bp, (longlong_t)BPE_GET_ETYPE(bp));
1135 }
1136 }
1137
6cb8e530
PZ
1138 /*
1139 * Do not verify individual DVAs if the config is not trusted. This
1140 * will be done once the zio is executed in vdev_mirror_map_alloc.
1141 */
1142 if (!spa->spa_trust_config)
b9ec4a15 1143 return (errors == 0);
6cb8e530 1144
3095ca91
MA
1145 switch (blk_config) {
1146 case BLK_CONFIG_HELD:
dc04a8c7 1147 ASSERT(spa_config_held(spa, SCL_VDEV, RW_WRITER));
3095ca91
MA
1148 break;
1149 case BLK_CONFIG_NEEDED:
1150 spa_config_enter(spa, SCL_VDEV, bp, RW_READER);
1151 break;
1152 case BLK_CONFIG_SKIP:
1153 return (errors == 0);
1154 default:
1155 panic("invalid blk_config %u", blk_config);
1156 }
1157
63e3a861
MA
1158 /*
1159 * Pool-specific checks.
1160 *
493fcce9
GW
1161 * Note: it would be nice to verify that the logical birth
1162 * and physical birth are not too large. However,
1163 * spa_freeze() allows the birth time of log blocks (and
1164 * dmu_sync()-ed blocks that are in the log) to be arbitrarily
1165 * large.
63e3a861 1166 */
1c27024e 1167 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
2b56a634
MA
1168 const dva_t *dva = &bp->blk_dva[i];
1169 uint64_t vdevid = DVA_GET_VDEV(dva);
1c27024e 1170
63e3a861 1171 if (vdevid >= spa->spa_root_vdev->vdev_children) {
bc67cba7 1172 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
3095ca91 1173 "blkptr at %px DVA %u has invalid VDEV %llu",
63e3a861 1174 bp, i, (longlong_t)vdevid);
ee3a23b8 1175 continue;
63e3a861 1176 }
1c27024e 1177 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
63e3a861 1178 if (vd == NULL) {
bc67cba7 1179 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
3095ca91 1180 "blkptr at %px DVA %u has invalid VDEV %llu",
63e3a861 1181 bp, i, (longlong_t)vdevid);
ee3a23b8 1182 continue;
63e3a861
MA
1183 }
1184 if (vd->vdev_ops == &vdev_hole_ops) {
bc67cba7 1185 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
3095ca91 1186 "blkptr at %px DVA %u has hole VDEV %llu",
63e3a861 1187 bp, i, (longlong_t)vdevid);
ee3a23b8 1188 continue;
63e3a861
MA
1189 }
1190 if (vd->vdev_ops == &vdev_missing_ops) {
1191 /*
1192 * "missing" vdevs are valid during import, but we
1193 * don't have their detailed info (e.g. asize), so
1194 * we can't perform any more checks on them.
1195 */
1196 continue;
1197 }
2b56a634
MA
1198 uint64_t offset = DVA_GET_OFFSET(dva);
1199 uint64_t asize = DVA_GET_ASIZE(dva);
1200 if (DVA_GET_GANG(dva))
1201 asize = vdev_gang_header_asize(vd);
63e3a861 1202 if (offset + asize > vd->vdev_asize) {
bc67cba7 1203 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
3095ca91 1204 "blkptr at %px DVA %u has invalid OFFSET %llu",
63e3a861
MA
1205 bp, i, (longlong_t)offset);
1206 }
1207 }
3095ca91 1208 if (blk_config == BLK_CONFIG_NEEDED)
dc04a8c7 1209 spa_config_exit(spa, SCL_VDEV, bp);
bc67cba7
PZ
1210
1211 return (errors == 0);
63e3a861
MA
1212}
1213
6cb8e530
PZ
1214boolean_t
1215zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp)
1216{
14e4e3cb 1217 (void) bp;
6cb8e530
PZ
1218 uint64_t vdevid = DVA_GET_VDEV(dva);
1219
1220 if (vdevid >= spa->spa_root_vdev->vdev_children)
1221 return (B_FALSE);
1222
1223 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
1224 if (vd == NULL)
1225 return (B_FALSE);
1226
1227 if (vd->vdev_ops == &vdev_hole_ops)
1228 return (B_FALSE);
1229
1230 if (vd->vdev_ops == &vdev_missing_ops) {
1231 return (B_FALSE);
1232 }
1233
1234 uint64_t offset = DVA_GET_OFFSET(dva);
1235 uint64_t asize = DVA_GET_ASIZE(dva);
1236
2b56a634
MA
1237 if (DVA_GET_GANG(dva))
1238 asize = vdev_gang_header_asize(vd);
6cb8e530
PZ
1239 if (offset + asize > vd->vdev_asize)
1240 return (B_FALSE);
1241
1242 return (B_TRUE);
1243}
1244
34dc7c2f 1245zio_t *
b128c09f 1246zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
a6255b7f 1247 abd_t *data, uint64_t size, zio_done_func_t *done, void *private,
4938d01d 1248 zio_priority_t priority, zio_flag_t flags, const zbookmark_phys_t *zb)
34dc7c2f
BB
1249{
1250 zio_t *zio;
1251
493fcce9 1252 zio = zio_create(pio, spa, BP_GET_BIRTH(bp), bp,
2aa34383 1253 data, size, size, done, private,
b128c09f 1254 ZIO_TYPE_READ, priority, flags, NULL, 0, zb,
428870ff
BB
1255 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
1256 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE);
34dc7c2f 1257
b128c09f
BB
1258 return (zio);
1259}
34dc7c2f 1260
34dc7c2f 1261zio_t *
b128c09f 1262zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
a6255b7f 1263 abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp,
bc77ba73 1264 zio_done_func_t *ready, zio_done_func_t *children_ready,
ccec7fbe
AM
1265 zio_done_func_t *done, void *private, zio_priority_t priority,
1266 zio_flag_t flags, const zbookmark_phys_t *zb)
34dc7c2f
BB
1267{
1268 zio_t *zio;
1269
b128c09f
BB
1270 ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF &&
1271 zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS &&
1272 zp->zp_compress >= ZIO_COMPRESS_OFF &&
1273 zp->zp_compress < ZIO_COMPRESS_FUNCTIONS &&
9ae529ec 1274 DMU_OT_IS_VALID(zp->zp_type) &&
b128c09f 1275 zp->zp_level < 32 &&
428870ff 1276 zp->zp_copies > 0 &&
03c6040b 1277 zp->zp_copies <= spa_max_replication(spa));
34dc7c2f 1278
2aa34383 1279 zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private,
b128c09f 1280 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb,
428870ff
BB
1281 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
1282 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE);
34dc7c2f
BB
1283
1284 zio->io_ready = ready;
bc77ba73 1285 zio->io_children_ready = children_ready;
b128c09f 1286 zio->io_prop = *zp;
34dc7c2f 1287
9b67f605
MA
1288 /*
1289 * Data can be NULL if we are going to call zio_write_override() to
1290 * provide the already-allocated BP. But we may need the data to
1291 * verify a dedup hit (if requested). In this case, don't try to
b5256303
TC
1292 * dedup (just take the already-allocated BP verbatim). Encrypted
1293 * dedup blocks need data as well so we also disable dedup in this
1294 * case.
9b67f605 1295 */
b5256303
TC
1296 if (data == NULL &&
1297 (zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) {
9b67f605
MA
1298 zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE;
1299 }
1300
34dc7c2f
BB
1301 return (zio);
1302}
1303
1304zio_t *
a6255b7f 1305zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data,
e8b96c60 1306 uint64_t size, zio_done_func_t *done, void *private,
4938d01d 1307 zio_priority_t priority, zio_flag_t flags, zbookmark_phys_t *zb)
34dc7c2f
BB
1308{
1309 zio_t *zio;
1310
2aa34383 1311 zio = zio_create(pio, spa, txg, bp, data, size, size, done, private,
3dfb57a3 1312 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb,
b128c09f 1313 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE);
34dc7c2f
BB
1314
1315 return (zio);
1316}
1317
428870ff 1318void
67a1b037
PJD
1319zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite,
1320 boolean_t brtwrite)
428870ff
BB
1321{
1322 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
1323 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1324 ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
1325 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa));
67a1b037 1326 ASSERT(!brtwrite || !nopwrite);
428870ff 1327
03c6040b
GW
1328 /*
1329 * We must reset the io_prop to match the values that existed
1330 * when the bp was first written by dmu_sync() keeping in mind
1331 * that nopwrite and dedup are mutually exclusive.
1332 */
1333 zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup;
1334 zio->io_prop.zp_nopwrite = nopwrite;
67a1b037 1335 zio->io_prop.zp_brtwrite = brtwrite;
428870ff
BB
1336 zio->io_prop.zp_copies = copies;
1337 zio->io_bp_override = bp;
1338}
1339
1340void
1341zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
1342{
9b67f605 1343
3095ca91 1344 (void) zfs_blkptr_verify(spa, bp, BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
a1d477c2 1345
9b67f605
MA
1346 /*
1347 * The check for EMBEDDED is a performance optimization. We
1348 * process the free here (by ignoring it) rather than
1349 * putting it on the list and then processing it in zio_free_sync().
1350 */
1351 if (BP_IS_EMBEDDED(bp))
1352 return;
2883cad5
MA
1353
1354 /*
1355 * Frees that are for the currently-syncing txg, are not going to be
1356 * deferred, and which will not need to do a read (i.e. not GANG or
1357 * DEDUP), can be processed immediately. Otherwise, put them on the
1358 * in-memory list for later processing.
93e28d66
SD
1359 *
1360 * Note that we only defer frees after zfs_sync_pass_deferred_free
1361 * when the log space map feature is disabled. [see relevant comment
1362 * in spa_sync_iterate_to_convergence()]
2883cad5 1363 */
93e28d66
SD
1364 if (BP_IS_GANG(bp) ||
1365 BP_GET_DEDUP(bp) ||
2883cad5 1366 txg != spa->spa_syncing_txg ||
93e28d66 1367 (spa_sync_pass(spa) >= zfs_sync_pass_deferred_free &&
67a1b037
PJD
1368 !spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) ||
1369 brt_maybe_exists(spa, bp)) {
6694ca55 1370 metaslab_check_free(spa, bp);
2883cad5
MA
1371 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
1372 } else {
9cdf7b1f 1373 VERIFY3P(zio_free_sync(NULL, spa, txg, bp, 0), ==, NULL);
2883cad5 1374 }
428870ff
BB
1375}
1376
9cdf7b1f
MA
1377/*
1378 * To improve performance, this function may return NULL if we were able
1379 * to do the free immediately. This avoids the cost of creating a zio
1380 * (and linking it to the parent, etc).
1381 */
34dc7c2f 1382zio_t *
428870ff 1383zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
4938d01d 1384 zio_flag_t flags)
34dc7c2f 1385{
428870ff
BB
1386 ASSERT(!BP_IS_HOLE(bp));
1387 ASSERT(spa_syncing_txg(spa) == txg);
34dc7c2f 1388
9b67f605 1389 if (BP_IS_EMBEDDED(bp))
9cdf7b1f 1390 return (NULL);
9b67f605 1391
13fe0198 1392 metaslab_check_free(spa, bp);
8c841793 1393 arc_freed(spa, bp);
d4a72f23 1394 dsl_scan_freed(spa, bp);
13fe0198 1395
67a1b037
PJD
1396 if (BP_IS_GANG(bp) ||
1397 BP_GET_DEDUP(bp) ||
1398 brt_maybe_exists(spa, bp)) {
9cdf7b1f 1399 /*
67a1b037
PJD
1400 * GANG, DEDUP and BRT blocks can induce a read (for the gang
1401 * block header, the DDT or the BRT), so issue them
1402 * asynchronously so that this thread is not tied up.
9cdf7b1f
MA
1403 */
1404 enum zio_stage stage =
1405 ZIO_FREE_PIPELINE | ZIO_STAGE_ISSUE_ASYNC;
2883cad5 1406
9cdf7b1f
MA
1407 return (zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
1408 BP_GET_PSIZE(bp), NULL, NULL,
1409 ZIO_TYPE_FREE, ZIO_PRIORITY_NOW,
1410 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage));
1411 } else {
1412 metaslab_free(spa, bp, txg, B_FALSE);
1413 return (NULL);
1414 }
34dc7c2f
BB
1415}
1416
1417zio_t *
428870ff 1418zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
4938d01d 1419 zio_done_func_t *done, void *private, zio_flag_t flags)
34dc7c2f
BB
1420{
1421 zio_t *zio;
1422
3095ca91
MA
1423 (void) zfs_blkptr_verify(spa, bp, (flags & ZIO_FLAG_CONFIG_WRITER) ?
1424 BLK_CONFIG_HELD : BLK_CONFIG_NEEDED, BLK_VERIFY_HALT);
9b67f605
MA
1425
1426 if (BP_IS_EMBEDDED(bp))
1427 return (zio_null(pio, spa, NULL, NULL, NULL, 0));
1428
34dc7c2f
BB
1429 /*
1430 * A claim is an allocation of a specific block. Claims are needed
1431 * to support immediate writes in the intent log. The issue is that
1432 * immediate writes contain committed data, but in a txg that was
1433 * *not* committed. Upon opening the pool after an unclean shutdown,
1434 * the intent log claims all blocks that contain immediate write data
1435 * so that the SPA knows they're in use.
1436 *
1437 * All claims *must* be resolved in the first txg -- before the SPA
1438 * starts allocating blocks -- so that nothing is allocated twice.
428870ff 1439 * If txg == 0 we just verify that the block is claimable.
34dc7c2f 1440 */
493fcce9 1441 ASSERT3U(BP_GET_LOGICAL_BIRTH(&spa->spa_uberblock.ub_rootbp), <,
d2734cce
SD
1442 spa_min_claim_txg(spa));
1443 ASSERT(txg == spa_min_claim_txg(spa) || txg == 0);
76d04993 1444 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(8) */
34dc7c2f 1445
b128c09f 1446 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
2aa34383
DK
1447 BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW,
1448 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE);
3dfb57a3 1449 ASSERT0(zio->io_queued_timestamp);
34dc7c2f
BB
1450
1451 return (zio);
1452}
1453
1b939560
BB
1454zio_t *
1455zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1456 zio_done_func_t *done, void *private, zio_priority_t priority,
4938d01d 1457 zio_flag_t flags, enum trim_flag trim_flags)
1b939560
BB
1458{
1459 zio_t *zio;
1460
1461 ASSERT0(vd->vdev_children);
1462 ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
1463 ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift));
1464 ASSERT3U(size, !=, 0);
1465
1466 zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done,
1467 private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL,
1468 vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE);
1469 zio->io_trim_flags = trim_flags;
1470
1471 return (zio);
1472}
1473
34dc7c2f
BB
1474zio_t *
1475zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
a6255b7f 1476 abd_t *data, int checksum, zio_done_func_t *done, void *private,
4938d01d 1477 zio_priority_t priority, zio_flag_t flags, boolean_t labels)
34dc7c2f
BB
1478{
1479 zio_t *zio;
34dc7c2f 1480
b128c09f
BB
1481 ASSERT(vd->vdev_children == 0);
1482 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
1483 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
1484 ASSERT3U(offset + size, <=, vd->vdev_psize);
34dc7c2f 1485
2aa34383
DK
1486 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
1487 private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd,
1488 offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE);
34dc7c2f 1489
b128c09f 1490 zio->io_prop.zp_checksum = checksum;
34dc7c2f
BB
1491
1492 return (zio);
1493}
1494
1495zio_t *
1496zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
a6255b7f 1497 abd_t *data, int checksum, zio_done_func_t *done, void *private,
4938d01d 1498 zio_priority_t priority, zio_flag_t flags, boolean_t labels)
34dc7c2f 1499{
34dc7c2f 1500 zio_t *zio;
34dc7c2f 1501
b128c09f
BB
1502 ASSERT(vd->vdev_children == 0);
1503 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
1504 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
1505 ASSERT3U(offset + size, <=, vd->vdev_psize);
34dc7c2f 1506
2aa34383
DK
1507 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
1508 private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd,
1509 offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE);
34dc7c2f 1510
b128c09f 1511 zio->io_prop.zp_checksum = checksum;
34dc7c2f 1512
3c67d83a 1513 if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
34dc7c2f 1514 /*
428870ff 1515 * zec checksums are necessarily destructive -- they modify
b128c09f 1516 * the end of the write buffer to hold the verifier/checksum.
34dc7c2f 1517 * Therefore, we must make a local copy in case the data is
b128c09f 1518 * being written to multiple places in parallel.
34dc7c2f 1519 */
a6255b7f
DQ
1520 abd_t *wbuf = abd_alloc_sametype(data, size);
1521 abd_copy(wbuf, data, size);
1522
b128c09f 1523 zio_push_transform(zio, wbuf, size, size, NULL);
34dc7c2f
BB
1524 }
1525
1526 return (zio);
1527}
1528
1529/*
b128c09f 1530 * Create a child I/O to do some work for us.
34dc7c2f
BB
1531 */
1532zio_t *
b128c09f 1533zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
4ea3f864 1534 abd_t *data, uint64_t size, int type, zio_priority_t priority,
4938d01d 1535 zio_flag_t flags, zio_done_func_t *done, void *private)
34dc7c2f 1536{
428870ff 1537 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE;
b128c09f
BB
1538 zio_t *zio;
1539
a1d477c2
MA
1540 /*
1541 * vdev child I/Os do not propagate their error to the parent.
1542 * Therefore, for correct operation the caller *must* check for
1543 * and handle the error in the child i/o's done callback.
1544 * The only exceptions are i/os that we don't care about
1545 * (OPTIONAL or REPAIR).
1546 */
1547 ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) ||
1548 done != NULL);
1549
34dc7c2f
BB
1550 if (type == ZIO_TYPE_READ && bp != NULL) {
1551 /*
1552 * If we have the bp, then the child should perform the
1553 * checksum and the parent need not. This pushes error
1554 * detection as close to the leaves as possible and
1555 * eliminates redundant checksums in the interior nodes.
1556 */
428870ff
BB
1557 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY;
1558 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
34dc7c2f
BB
1559 }
1560
a1d477c2
MA
1561 if (vd->vdev_ops->vdev_op_leaf) {
1562 ASSERT0(vd->vdev_children);
b128c09f 1563 offset += VDEV_LABEL_START_SIZE;
a1d477c2 1564 }
b128c09f 1565
a1d477c2 1566 flags |= ZIO_VDEV_CHILD_FLAGS(pio);
428870ff
BB
1567
1568 /*
1569 * If we've decided to do a repair, the write is not speculative --
1570 * even if the original read was.
1571 */
1572 if (flags & ZIO_FLAG_IO_REPAIR)
1573 flags &= ~ZIO_FLAG_SPECULATIVE;
1574
3dfb57a3
DB
1575 /*
1576 * If we're creating a child I/O that is not associated with a
1577 * top-level vdev, then the child zio is not an allocating I/O.
1578 * If this is a retried I/O then we ignore it since we will
1579 * have already processed the original allocating I/O.
1580 */
1581 if (flags & ZIO_FLAG_IO_ALLOCATING &&
1582 (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) {
cc99f275
DB
1583 ASSERT(pio->io_metaslab_class != NULL);
1584 ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled);
3dfb57a3
DB
1585 ASSERT(type == ZIO_TYPE_WRITE);
1586 ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE);
1587 ASSERT(!(flags & ZIO_FLAG_IO_REPAIR));
1588 ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) ||
1589 pio->io_child_type == ZIO_CHILD_GANG);
1590
1591 flags &= ~ZIO_FLAG_IO_ALLOCATING;
1592 }
1593
2aa34383 1594 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size,
428870ff
BB
1595 done, private, type, priority, flags, vd, offset, &pio->io_bookmark,
1596 ZIO_STAGE_VDEV_IO_START >> 1, pipeline);
3dfb57a3 1597 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
34dc7c2f 1598
b128c09f 1599 return (zio);
34dc7c2f
BB
1600}
1601
b128c09f 1602zio_t *
a6255b7f 1603zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size,
4938d01d 1604 zio_type_t type, zio_priority_t priority, zio_flag_t flags,
e9aa730c 1605 zio_done_func_t *done, void *private)
34dc7c2f 1606{
b128c09f 1607 zio_t *zio;
34dc7c2f 1608
b128c09f 1609 ASSERT(vd->vdev_ops->vdev_op_leaf);
34dc7c2f 1610
b128c09f 1611 zio = zio_create(NULL, vd->vdev_spa, 0, NULL,
2aa34383 1612 data, size, size, done, private, type, priority,
e8b96c60 1613 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED,
b128c09f 1614 vd, offset, NULL,
428870ff 1615 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE);
34dc7c2f 1616
b128c09f 1617 return (zio);
34dc7c2f
BB
1618}
1619
cac416f1
RN
1620
1621/*
1622 * Send a flush command to the given vdev. Unlike most zio creation functions,
1623 * the flush zios are issued immediately. You can wait on pio to pause until
1624 * the flushes complete.
1625 */
34dc7c2f 1626void
5a3bffab 1627zio_flush(zio_t *pio, vdev_t *vd)
34dc7c2f 1628{
cac416f1
RN
1629 const zio_flag_t flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE |
1630 ZIO_FLAG_DONT_RETRY;
1631
5a3bffab
AM
1632 if (vd->vdev_nowritecache)
1633 return;
cac416f1 1634
5a3bffab 1635 if (vd->vdev_children == 0) {
c9c838aa 1636 zio_nowait(zio_create(pio, vd->vdev_spa, 0, NULL, NULL, 0, 0,
d7605ae7
RN
1637 NULL, NULL, ZIO_TYPE_FLUSH, ZIO_PRIORITY_NOW, flags, vd, 0,
1638 NULL, ZIO_STAGE_OPEN, ZIO_FLUSH_PIPELINE));
5a3bffab
AM
1639 } else {
1640 for (uint64_t c = 0; c < vd->vdev_children; c++)
1641 zio_flush(pio, vd->vdev_child[c]);
1642 }
34dc7c2f
BB
1643}
1644
428870ff
BB
1645void
1646zio_shrink(zio_t *zio, uint64_t size)
1647{
1ce23dca
PS
1648 ASSERT3P(zio->io_executor, ==, NULL);
1649 ASSERT3U(zio->io_orig_size, ==, zio->io_size);
1650 ASSERT3U(size, <=, zio->io_size);
428870ff
BB
1651
1652 /*
1653 * We don't shrink for raidz because of problems with the
1654 * reconstruction when reading back less than the block size.
1655 * Note, BP_IS_RAIDZ() assumes no compression.
1656 */
1657 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
2aa34383
DK
1658 if (!BP_IS_RAIDZ(zio->io_bp)) {
1659 /* we are not doing a raw write */
1660 ASSERT3U(zio->io_size, ==, zio->io_lsize);
1661 zio->io_orig_size = zio->io_size = zio->io_lsize = size;
1662 }
428870ff
BB
1663}
1664
d9bb583c
AH
1665/*
1666 * Round provided allocation size up to a value that can be allocated
1667 * by at least some vdev(s) in the pool with minimum or no additional
1668 * padding and without extra space usage on others
1669 */
1670static uint64_t
1671zio_roundup_alloc_size(spa_t *spa, uint64_t size)
1672{
1673 if (size > spa->spa_min_alloc)
1674 return (roundup(size, spa->spa_gcd_alloc));
1675 return (spa->spa_min_alloc);
1676}
1677
34dc7c2f
BB
1678/*
1679 * ==========================================================================
b128c09f 1680 * Prepare to read and write logical blocks
34dc7c2f
BB
1681 * ==========================================================================
1682 */
b128c09f 1683
62840030 1684static zio_t *
b128c09f 1685zio_read_bp_init(zio_t *zio)
34dc7c2f 1686{
b128c09f 1687 blkptr_t *bp = zio->io_bp;
b5256303
TC
1688 uint64_t psize =
1689 BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp);
34dc7c2f 1690
a1d477c2
MA
1691 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
1692
fb5f0bc8 1693 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
9babb374 1694 zio->io_child_type == ZIO_CHILD_LOGICAL &&
b5256303 1695 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
a6255b7f
DQ
1696 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
1697 psize, psize, zio_decompress);
34dc7c2f 1698 }
34dc7c2f 1699
b5256303
TC
1700 if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) ||
1701 BP_HAS_INDIRECT_MAC_CKSUM(bp)) &&
1702 zio->io_child_type == ZIO_CHILD_LOGICAL) {
1703 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
1704 psize, psize, zio_decrypt);
1705 }
1706
9b67f605 1707 if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) {
a6255b7f
DQ
1708 int psize = BPE_GET_PSIZE(bp);
1709 void *data = abd_borrow_buf(zio->io_abd, psize);
1710
9b67f605 1711 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
a6255b7f
DQ
1712 decode_embedded_bp_compressed(bp, data);
1713 abd_return_buf_copy(zio->io_abd, data, psize);
9b67f605
MA
1714 } else {
1715 ASSERT(!BP_IS_EMBEDDED(bp));
1716 }
1717
428870ff
BB
1718 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL)
1719 zio->io_pipeline = ZIO_DDT_READ_PIPELINE;
1720
62840030 1721 return (zio);
34dc7c2f
BB
1722}
1723
62840030 1724static zio_t *
b128c09f 1725zio_write_bp_init(zio_t *zio)
34dc7c2f 1726{
b128c09f 1727 if (!IO_IS_ALLOCATING(zio))
62840030 1728 return (zio);
34dc7c2f 1729
428870ff
BB
1730 ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1731
1732 if (zio->io_bp_override) {
3dfb57a3
DB
1733 blkptr_t *bp = zio->io_bp;
1734 zio_prop_t *zp = &zio->io_prop;
1735
493fcce9 1736 ASSERT(BP_GET_LOGICAL_BIRTH(bp) != zio->io_txg);
428870ff
BB
1737
1738 *bp = *zio->io_bp_override;
1739 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1740
67a1b037
PJD
1741 if (zp->zp_brtwrite)
1742 return (zio);
1743
1744 ASSERT(!BP_GET_DEDUP(zio->io_bp_override));
1745
9b67f605 1746 if (BP_IS_EMBEDDED(bp))
62840030 1747 return (zio);
9b67f605 1748
03c6040b
GW
1749 /*
1750 * If we've been overridden and nopwrite is set then
1751 * set the flag accordingly to indicate that a nopwrite
1752 * has already occurred.
1753 */
1754 if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) {
1755 ASSERT(!zp->zp_dedup);
3dfb57a3 1756 ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum);
03c6040b 1757 zio->io_flags |= ZIO_FLAG_NOPWRITE;
62840030 1758 return (zio);
03c6040b
GW
1759 }
1760
1761 ASSERT(!zp->zp_nopwrite);
1762
428870ff 1763 if (BP_IS_HOLE(bp) || !zp->zp_dedup)
62840030 1764 return (zio);
428870ff 1765
3c67d83a
TH
1766 ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags &
1767 ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify);
428870ff 1768
b5256303
TC
1769 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum &&
1770 !zp->zp_encrypt) {
428870ff
BB
1771 BP_SET_DEDUP(bp, 1);
1772 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE;
62840030 1773 return (zio);
428870ff 1774 }
3dfb57a3
DB
1775
1776 /*
1777 * We were unable to handle this as an override bp, treat
1778 * it as a regular write I/O.
1779 */
5511754b 1780 zio->io_bp_override = NULL;
3dfb57a3
DB
1781 *bp = zio->io_bp_orig;
1782 zio->io_pipeline = zio->io_orig_pipeline;
1783 }
1784
62840030 1785 return (zio);
3dfb57a3
DB
1786}
1787
62840030 1788static zio_t *
3dfb57a3
DB
1789zio_write_compress(zio_t *zio)
1790{
1791 spa_t *spa = zio->io_spa;
1792 zio_prop_t *zp = &zio->io_prop;
1793 enum zio_compress compress = zp->zp_compress;
1794 blkptr_t *bp = zio->io_bp;
1795 uint64_t lsize = zio->io_lsize;
1796 uint64_t psize = zio->io_size;
fdc2d303 1797 uint32_t pass = 1;
3dfb57a3 1798
3dfb57a3
DB
1799 /*
1800 * If our children haven't all reached the ready stage,
1801 * wait for them and then repeat this pipeline stage.
1802 */
ddc751d5
GW
1803 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
1804 ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) {
62840030 1805 return (NULL);
ddc751d5 1806 }
3dfb57a3
DB
1807
1808 if (!IO_IS_ALLOCATING(zio))
62840030 1809 return (zio);
3dfb57a3
DB
1810
1811 if (zio->io_children_ready != NULL) {
1812 /*
1813 * Now that all our children are ready, run the callback
1814 * associated with this zio in case it wants to modify the
1815 * data to be written.
1816 */
1817 ASSERT3U(zp->zp_level, >, 0);
1818 zio->io_children_ready(zio);
428870ff 1819 }
34dc7c2f 1820
3dfb57a3
DB
1821 ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1822 ASSERT(zio->io_bp_override == NULL);
1823
493fcce9 1824 if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg) {
b128c09f
BB
1825 /*
1826 * We're rewriting an existing block, which means we're
1827 * working on behalf of spa_sync(). For spa_sync() to
1828 * converge, it must eventually be the case that we don't
1829 * have to allocate new blocks. But compression changes
1830 * the blocksize, which forces a reallocate, and makes
1831 * convergence take longer. Therefore, after the first
1832 * few passes, stop compressing to ensure convergence.
1833 */
428870ff
BB
1834 pass = spa_sync_pass(spa);
1835
1836 ASSERT(zio->io_txg == spa_syncing_txg(spa));
1837 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1838 ASSERT(!BP_GET_DEDUP(bp));
34dc7c2f 1839
55d85d5a 1840 if (pass >= zfs_sync_pass_dont_compress)
b128c09f 1841 compress = ZIO_COMPRESS_OFF;
34dc7c2f 1842
b128c09f 1843 /* Make sure someone doesn't change their mind on overwrites */
ed39d668
SD
1844 ASSERT(BP_IS_EMBEDDED(bp) || BP_IS_GANG(bp) ||
1845 MIN(zp->zp_copies, spa_max_replication(spa))
1846 == BP_GET_NDVAS(bp));
b128c09f 1847 }
34dc7c2f 1848
2aa34383 1849 /* If it's a compressed write that is not raw, compress the buffer. */
b5256303
TC
1850 if (compress != ZIO_COMPRESS_OFF &&
1851 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
bff26b02
RY
1852 void *cbuf = NULL;
1853 psize = zio_compress_data(compress, zio->io_abd, &cbuf, lsize,
10b3c7f5 1854 zp->zp_complevel);
bff26b02 1855 if (psize == 0) {
b128c09f 1856 compress = ZIO_COMPRESS_OFF;
bff26b02
RY
1857 } else if (psize >= lsize) {
1858 compress = ZIO_COMPRESS_OFF;
1859 if (cbuf != NULL)
1860 zio_buf_free(cbuf, lsize);
b5256303
TC
1861 } else if (!zp->zp_dedup && !zp->zp_encrypt &&
1862 psize <= BPE_PAYLOAD_SIZE &&
9b67f605
MA
1863 zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) &&
1864 spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) {
1865 encode_embedded_bp_compressed(bp,
1866 cbuf, compress, lsize, psize);
1867 BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA);
1868 BP_SET_TYPE(bp, zio->io_prop.zp_type);
1869 BP_SET_LEVEL(bp, zio->io_prop.zp_level);
1870 zio_buf_free(cbuf, lsize);
493fcce9 1871 BP_SET_LOGICAL_BIRTH(bp, zio->io_txg);
9b67f605
MA
1872 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1873 ASSERT(spa_feature_is_active(spa,
1874 SPA_FEATURE_EMBEDDED_DATA));
62840030 1875 return (zio);
428870ff 1876 } else {
9b67f605 1877 /*
b2255edc
BB
1878 * Round compressed size up to the minimum allocation
1879 * size of the smallest-ashift device, and zero the
1880 * tail. This ensures that the compressed size of the
1881 * BP (and thus compressratio property) are correct,
c3520e7f
MA
1882 * in that we charge for the padding used to fill out
1883 * the last sector.
9b67f605 1884 */
d9bb583c
AH
1885 size_t rounded = (size_t)zio_roundup_alloc_size(spa,
1886 psize);
c3520e7f 1887 if (rounded >= lsize) {
9b67f605
MA
1888 compress = ZIO_COMPRESS_OFF;
1889 zio_buf_free(cbuf, lsize);
c3520e7f 1890 psize = lsize;
9b67f605 1891 } else {
a6255b7f
DQ
1892 abd_t *cdata = abd_get_from_buf(cbuf, lsize);
1893 abd_take_ownership_of_buf(cdata, B_TRUE);
1894 abd_zero_off(cdata, psize, rounded - psize);
c3520e7f 1895 psize = rounded;
a6255b7f 1896 zio_push_transform(zio, cdata,
9b67f605
MA
1897 psize, lsize, NULL);
1898 }
b128c09f 1899 }
3dfb57a3
DB
1900
1901 /*
1902 * We were unable to handle this as an override bp, treat
1903 * it as a regular write I/O.
1904 */
1905 zio->io_bp_override = NULL;
1906 *bp = zio->io_bp_orig;
1907 zio->io_pipeline = zio->io_orig_pipeline;
1908
b1d21733
TC
1909 } else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 &&
1910 zp->zp_type == DMU_OT_DNODE) {
1911 /*
1912 * The DMU actually relies on the zio layer's compression
1913 * to free metadnode blocks that have had all contained
1914 * dnodes freed. As a result, even when doing a raw
1915 * receive, we must check whether the block can be compressed
1916 * to a hole.
1917 */
1918 psize = zio_compress_data(ZIO_COMPRESS_EMPTY,
10b3c7f5
MN
1919 zio->io_abd, NULL, lsize, zp->zp_complevel);
1920 if (psize == 0 || psize >= lsize)
b1d21733 1921 compress = ZIO_COMPRESS_OFF;
52a36bd4
GA
1922 } else if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS &&
1923 !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) {
1924 /*
1925 * If we are raw receiving an encrypted dataset we should not
1926 * take this codepath because it will change the on-disk block
1927 * and decryption will fail.
1928 */
d9bb583c
AH
1929 size_t rounded = MIN((size_t)zio_roundup_alloc_size(spa, psize),
1930 lsize);
c634320e
PD
1931
1932 if (rounded != psize) {
1933 abd_t *cdata = abd_alloc_linear(rounded, B_TRUE);
1934 abd_zero_off(cdata, psize, rounded - psize);
1935 abd_copy_off(cdata, zio->io_abd, 0, 0, psize);
1936 psize = rounded;
1937 zio_push_transform(zio, cdata,
1938 psize, rounded, NULL);
1939 }
2aa34383
DK
1940 } else {
1941 ASSERT3U(psize, !=, 0);
b128c09f 1942 }
34dc7c2f 1943
b128c09f
BB
1944 /*
1945 * The final pass of spa_sync() must be all rewrites, but the first
1946 * few passes offer a trade-off: allocating blocks defers convergence,
1947 * but newly allocated blocks are sequential, so they can be written
1948 * to disk faster. Therefore, we allow the first few passes of
1949 * spa_sync() to allocate new blocks, but force rewrites after that.
1950 * There should only be a handful of blocks after pass 1 in any case.
1951 */
493fcce9 1952 if (!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg &&
b0bc7a84 1953 BP_GET_PSIZE(bp) == psize &&
55d85d5a 1954 pass >= zfs_sync_pass_rewrite) {
cc99f275 1955 VERIFY3U(psize, !=, 0);
1c27024e 1956 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
cc99f275 1957
b128c09f
BB
1958 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
1959 zio->io_flags |= ZIO_FLAG_IO_REWRITE;
1960 } else {
1961 BP_ZERO(bp);
1962 zio->io_pipeline = ZIO_WRITE_PIPELINE;
1963 }
34dc7c2f 1964
428870ff 1965 if (psize == 0) {
493fcce9 1966 if (BP_GET_LOGICAL_BIRTH(&zio->io_bp_orig) != 0 &&
b0bc7a84
MG
1967 spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
1968 BP_SET_LSIZE(bp, lsize);
1969 BP_SET_TYPE(bp, zp->zp_type);
1970 BP_SET_LEVEL(bp, zp->zp_level);
1971 BP_SET_BIRTH(bp, zio->io_txg, 0);
1972 }
b128c09f
BB
1973 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1974 } else {
1975 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER);
1976 BP_SET_LSIZE(bp, lsize);
b0bc7a84
MG
1977 BP_SET_TYPE(bp, zp->zp_type);
1978 BP_SET_LEVEL(bp, zp->zp_level);
428870ff 1979 BP_SET_PSIZE(bp, psize);
b128c09f
BB
1980 BP_SET_COMPRESS(bp, compress);
1981 BP_SET_CHECKSUM(bp, zp->zp_checksum);
428870ff 1982 BP_SET_DEDUP(bp, zp->zp_dedup);
b128c09f 1983 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
428870ff
BB
1984 if (zp->zp_dedup) {
1985 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1986 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
b5256303
TC
1987 ASSERT(!zp->zp_encrypt ||
1988 DMU_OT_IS_ENCRYPTED(zp->zp_type));
428870ff
BB
1989 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE;
1990 }
03c6040b
GW
1991 if (zp->zp_nopwrite) {
1992 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1993 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
1994 zio->io_pipeline |= ZIO_STAGE_NOP_WRITE;
1995 }
428870ff 1996 }
62840030 1997 return (zio);
428870ff
BB
1998}
1999
62840030 2000static zio_t *
428870ff
BB
2001zio_free_bp_init(zio_t *zio)
2002{
2003 blkptr_t *bp = zio->io_bp;
2004
2005 if (zio->io_child_type == ZIO_CHILD_LOGICAL) {
2006 if (BP_GET_DEDUP(bp))
2007 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE;
b128c09f 2008 }
34dc7c2f 2009
a1d477c2
MA
2010 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
2011
62840030 2012 return (zio);
34dc7c2f
BB
2013}
2014
b128c09f
BB
2015/*
2016 * ==========================================================================
2017 * Execute the I/O pipeline
2018 * ==========================================================================
2019 */
2020
2021static void
7ef5e54e 2022zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
34dc7c2f 2023{
428870ff 2024 spa_t *spa = zio->io_spa;
b128c09f 2025 zio_type_t t = zio->io_type;
a38718a6 2026 int flags = (cutinline ? TQ_FRONT : 0);
34dc7c2f
BB
2027
2028 /*
9babb374
BB
2029 * If we're a config writer or a probe, the normal issue and
2030 * interrupt threads may all be blocked waiting for the config lock.
2031 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL.
34dc7c2f 2032 */
9babb374 2033 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE))
b128c09f 2034 t = ZIO_TYPE_NULL;
34dc7c2f
BB
2035
2036 /*
b128c09f 2037 * A similar issue exists for the L2ARC write thread until L2ARC 2.0.
34dc7c2f 2038 */
b128c09f
BB
2039 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux)
2040 t = ZIO_TYPE_NULL;
34dc7c2f 2041
428870ff 2042 /*
7ef5e54e 2043 * If this is a high priority I/O, then use the high priority taskq if
04bae5ec 2044 * available or cut the line otherwise.
428870ff 2045 */
04bae5ec
AM
2046 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) {
2047 if (spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
2048 q++;
2049 else
2050 flags |= TQ_FRONT;
2051 }
428870ff
BB
2052
2053 ASSERT3U(q, <, ZIO_TASKQ_TYPES);
5cc556b4 2054
a38718a6
GA
2055 /*
2056 * NB: We are assuming that the zio can only be dispatched
2057 * to a single taskq at a time. It would be a grievous error
2058 * to dispatch the zio to another taskq at the same time.
2059 */
2060 ASSERT(taskq_empty_ent(&zio->io_tqent));
23c13c7e 2061 spa_taskq_dispatch_ent(spa, t, q, zio_execute, zio, flags,
3bd4df38 2062 &zio->io_tqent, zio);
b128c09f 2063}
34dc7c2f 2064
b128c09f 2065static boolean_t
7ef5e54e 2066zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
b128c09f 2067{
b128c09f 2068 spa_t *spa = zio->io_spa;
34dc7c2f 2069
b3212d2f
MA
2070 taskq_t *tq = taskq_of_curthread();
2071
1c27024e 2072 for (zio_type_t t = 0; t < ZIO_TYPES; t++) {
7ef5e54e
AL
2073 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
2074 uint_t i;
2075 for (i = 0; i < tqs->stqs_count; i++) {
b3212d2f 2076 if (tqs->stqs_taskq[i] == tq)
7ef5e54e
AL
2077 return (B_TRUE);
2078 }
2079 }
34dc7c2f 2080
b128c09f
BB
2081 return (B_FALSE);
2082}
34dc7c2f 2083
62840030 2084static zio_t *
b128c09f
BB
2085zio_issue_async(zio_t *zio)
2086{
3bd4df38 2087 ASSERT((zio->io_type != ZIO_TYPE_WRITE) || ZIO_HAS_ALLOCATOR(zio));
428870ff 2088 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
62840030 2089 return (NULL);
34dc7c2f
BB
2090}
2091
b128c09f 2092void
23c13c7e 2093zio_interrupt(void *zio)
34dc7c2f 2094{
428870ff 2095 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE);
b128c09f 2096}
34dc7c2f 2097
d3c1e45b
MM
2098void
2099zio_delay_interrupt(zio_t *zio)
2100{
2101 /*
2102 * The timeout_generic() function isn't defined in userspace, so
2103 * rather than trying to implement the function, the zio delay
2104 * functionality has been disabled for userspace builds.
2105 */
2106
2107#ifdef _KERNEL
2108 /*
2109 * If io_target_timestamp is zero, then no delay has been registered
2110 * for this IO, thus jump to the end of this function and "skip" the
2111 * delay; issuing it directly to the zio layer.
2112 */
2113 if (zio->io_target_timestamp != 0) {
2114 hrtime_t now = gethrtime();
2115
2116 if (now >= zio->io_target_timestamp) {
2117 /*
2118 * This IO has already taken longer than the target
2119 * delay to complete, so we don't want to delay it
2120 * any longer; we "miss" the delay and issue it
2121 * directly to the zio layer. This is likely due to
2122 * the target latency being set to a value less than
2123 * the underlying hardware can satisfy (e.g. delay
2124 * set to 1ms, but the disks take 10ms to complete an
2125 * IO request).
2126 */
2127
2128 DTRACE_PROBE2(zio__delay__miss, zio_t *, zio,
2129 hrtime_t, now);
2130
2131 zio_interrupt(zio);
2132 } else {
2133 taskqid_t tid;
2134 hrtime_t diff = zio->io_target_timestamp - now;
2135 clock_t expire_at_tick = ddi_get_lbolt() +
2136 NSEC_TO_TICK(diff);
2137
2138 DTRACE_PROBE3(zio__delay__hit, zio_t *, zio,
2139 hrtime_t, now, hrtime_t, diff);
2140
2141 if (NSEC_TO_TICK(diff) == 0) {
2142 /* Our delay is less than a jiffy - just spin */
2143 zfs_sleep_until(zio->io_target_timestamp);
2144 zio_interrupt(zio);
2145 } else {
2146 /*
2147 * Use taskq_dispatch_delay() in the place of
2148 * OpenZFS's timeout_generic().
2149 */
2150 tid = taskq_dispatch_delay(system_taskq,
23c13c7e
AL
2151 zio_interrupt, zio, TQ_NOSLEEP,
2152 expire_at_tick);
d3c1e45b
MM
2153 if (tid == TASKQID_INVALID) {
2154 /*
2155 * Couldn't allocate a task. Just
2156 * finish the zio without a delay.
2157 */
2158 zio_interrupt(zio);
2159 }
2160 }
2161 }
2162 return;
2163 }
2164#endif
2165 DTRACE_PROBE1(zio__delay__skip, zio_t *, zio);
2166 zio_interrupt(zio);
2167}
2168
8fb1ede1 2169static void
638dd5f4 2170zio_deadman_impl(zio_t *pio, int ziodepth)
8fb1ede1
BB
2171{
2172 zio_t *cio, *cio_next;
2173 zio_link_t *zl = NULL;
2174 vdev_t *vd = pio->io_vd;
2175
638dd5f4
TC
2176 if (zio_deadman_log_all || (vd != NULL && vd->vdev_ops->vdev_op_leaf)) {
2177 vdev_queue_t *vq = vd ? &vd->vdev_queue : NULL;
8fb1ede1
BB
2178 zbookmark_phys_t *zb = &pio->io_bookmark;
2179 uint64_t delta = gethrtime() - pio->io_timestamp;
2180 uint64_t failmode = spa_get_deadman_failmode(pio->io_spa);
2181
a887d653 2182 zfs_dbgmsg("slow zio[%d]: zio=%px timestamp=%llu "
8fb1ede1 2183 "delta=%llu queued=%llu io=%llu "
8e739b2c
RE
2184 "path=%s "
2185 "last=%llu type=%d "
4938d01d 2186 "priority=%d flags=0x%llx stage=0x%x "
8e739b2c
RE
2187 "pipeline=0x%x pipeline-trace=0x%x "
2188 "objset=%llu object=%llu "
2189 "level=%llu blkid=%llu "
2190 "offset=%llu size=%llu "
2191 "error=%d",
638dd5f4 2192 ziodepth, pio, pio->io_timestamp,
8e739b2c
RE
2193 (u_longlong_t)delta, pio->io_delta, pio->io_delay,
2194 vd ? vd->vdev_path : "NULL",
2195 vq ? vq->vq_io_complete_ts : 0, pio->io_type,
4938d01d
RY
2196 pio->io_priority, (u_longlong_t)pio->io_flags,
2197 pio->io_stage, pio->io_pipeline, pio->io_pipeline_trace,
8e739b2c
RE
2198 (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
2199 (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid,
2200 (u_longlong_t)pio->io_offset, (u_longlong_t)pio->io_size,
2201 pio->io_error);
1144586b 2202 (void) zfs_ereport_post(FM_EREPORT_ZFS_DEADMAN,
4f072827 2203 pio->io_spa, vd, zb, pio, 0);
8fb1ede1
BB
2204
2205 if (failmode == ZIO_FAILURE_MODE_CONTINUE &&
2206 taskq_empty_ent(&pio->io_tqent)) {
2207 zio_interrupt(pio);
2208 }
2209 }
2210
2211 mutex_enter(&pio->io_lock);
2212 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
2213 cio_next = zio_walk_children(pio, &zl);
638dd5f4 2214 zio_deadman_impl(cio, ziodepth + 1);
8fb1ede1
BB
2215 }
2216 mutex_exit(&pio->io_lock);
2217}
2218
2219/*
2220 * Log the critical information describing this zio and all of its children
2221 * using the zfs_dbgmsg() interface then post deadman event for the ZED.
2222 */
2223void
dd66857d 2224zio_deadman(zio_t *pio, const char *tag)
8fb1ede1
BB
2225{
2226 spa_t *spa = pio->io_spa;
2227 char *name = spa_name(spa);
2228
2229 if (!zfs_deadman_enabled || spa_suspended(spa))
2230 return;
2231
638dd5f4 2232 zio_deadman_impl(pio, 0);
8fb1ede1
BB
2233
2234 switch (spa_get_deadman_failmode(spa)) {
2235 case ZIO_FAILURE_MODE_WAIT:
2236 zfs_dbgmsg("%s waiting for hung I/O to pool '%s'", tag, name);
2237 break;
2238
2239 case ZIO_FAILURE_MODE_CONTINUE:
2240 zfs_dbgmsg("%s restarting hung I/O for pool '%s'", tag, name);
2241 break;
2242
2243 case ZIO_FAILURE_MODE_PANIC:
2244 fm_panic("%s determined I/O to pool '%s' is hung.", tag, name);
2245 break;
2246 }
2247}
2248
b128c09f
BB
2249/*
2250 * Execute the I/O pipeline until one of the following occurs:
2251 * (1) the I/O completes; (2) the pipeline stalls waiting for
2252 * dependent child I/Os; (3) the I/O issues, so we're waiting
2253 * for an I/O completion interrupt; (4) the I/O is delegated by
2254 * vdev-level caching or aggregation; (5) the I/O is deferred
2255 * due to vdev-level queueing; (6) the I/O is handed off to
2256 * another thread. In all cases, the pipeline stops whenever
8e07b99b 2257 * there's no CPU work; it never burns a thread in cv_wait_io().
b128c09f
BB
2258 *
2259 * There's no locking on io_stage because there's no legitimate way
2260 * for multiple threads to be attempting to process the same I/O.
2261 */
428870ff 2262static zio_pipe_stage_t *zio_pipeline[];
34dc7c2f 2263
da6b4005
NB
2264/*
2265 * zio_execute() is a wrapper around the static function
2266 * __zio_execute() so that we can force __zio_execute() to be
2267 * inlined. This reduces stack overhead which is important
2268 * because __zio_execute() is called recursively in several zio
2269 * code paths. zio_execute() itself cannot be inlined because
2270 * it is externally visible.
2271 */
b128c09f 2272void
23c13c7e 2273zio_execute(void *zio)
da6b4005 2274{
92119cc2
BB
2275 fstrans_cookie_t cookie;
2276
2277 cookie = spl_fstrans_mark();
da6b4005 2278 __zio_execute(zio);
92119cc2 2279 spl_fstrans_unmark(cookie);
da6b4005
NB
2280}
2281
b58986ee
BB
2282/*
2283 * Used to determine if in the current context the stack is sized large
2284 * enough to allow zio_execute() to be called recursively. A minimum
2285 * stack size of 16K is required to avoid needing to re-dispatch the zio.
2286 */
65c7cc49 2287static boolean_t
b58986ee
BB
2288zio_execute_stack_check(zio_t *zio)
2289{
2290#if !defined(HAVE_LARGE_STACKS)
2291 dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
2292
2293 /* Executing in txg_sync_thread() context. */
2294 if (dp && curthread == dp->dp_tx.tx_sync_thread)
2295 return (B_TRUE);
2296
2297 /* Pool initialization outside of zio_taskq context. */
2298 if (dp && spa_is_initializing(dp->dp_spa) &&
2299 !zio_taskq_member(zio, ZIO_TASKQ_ISSUE) &&
2300 !zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH))
2301 return (B_TRUE);
14e4e3cb
AZ
2302#else
2303 (void) zio;
b58986ee
BB
2304#endif /* HAVE_LARGE_STACKS */
2305
2306 return (B_FALSE);
2307}
2308
da6b4005
NB
2309__attribute__((always_inline))
2310static inline void
2311__zio_execute(zio_t *zio)
b128c09f 2312{
3dfb57a3
DB
2313 ASSERT3U(zio->io_queued_timestamp, >, 0);
2314
b128c09f 2315 while (zio->io_stage < ZIO_STAGE_DONE) {
428870ff
BB
2316 enum zio_stage pipeline = zio->io_pipeline;
2317 enum zio_stage stage = zio->io_stage;
62840030
MA
2318
2319 zio->io_executor = curthread;
34dc7c2f 2320
b128c09f 2321 ASSERT(!MUTEX_HELD(&zio->io_lock));
428870ff
BB
2322 ASSERT(ISP2(stage));
2323 ASSERT(zio->io_stall == NULL);
34dc7c2f 2324
428870ff
BB
2325 do {
2326 stage <<= 1;
2327 } while ((stage & pipeline) == 0);
b128c09f
BB
2328
2329 ASSERT(stage <= ZIO_STAGE_DONE);
34dc7c2f
BB
2330
2331 /*
b128c09f
BB
2332 * If we are in interrupt context and this pipeline stage
2333 * will grab a config lock that is held across I/O,
428870ff
BB
2334 * or may wait for an I/O that needs an interrupt thread
2335 * to complete, issue async to avoid deadlock.
2336 *
2337 * For VDEV_IO_START, we cut in line so that the io will
2338 * be sent to disk promptly.
34dc7c2f 2339 */
91579709
BB
2340 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
2341 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
b58986ee
BB
2342 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
2343 zio_requeue_io_start_cut_in_line : B_FALSE;
91579709
BB
2344 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
2345 return;
2346 }
2347
2348 /*
b58986ee
BB
2349 * If the current context doesn't have large enough stacks
2350 * the zio must be issued asynchronously to prevent overflow.
91579709 2351 */
b58986ee
BB
2352 if (zio_execute_stack_check(zio)) {
2353 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
2354 zio_requeue_io_start_cut_in_line : B_FALSE;
428870ff 2355 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
b128c09f 2356 return;
34dc7c2f
BB
2357 }
2358
b128c09f 2359 zio->io_stage = stage;
3dfb57a3 2360 zio->io_pipeline_trace |= zio->io_stage;
34dc7c2f 2361
62840030
MA
2362 /*
2363 * The zio pipeline stage returns the next zio to execute
2364 * (typically the same as this one), or NULL if we should
2365 * stop.
2366 */
2367 zio = zio_pipeline[highbit64(stage) - 1](zio);
34dc7c2f 2368
62840030
MA
2369 if (zio == NULL)
2370 return;
b128c09f 2371 }
34dc7c2f
BB
2372}
2373
da6b4005 2374
b128c09f
BB
2375/*
2376 * ==========================================================================
2377 * Initiate I/O, either sync or async
2378 * ==========================================================================
2379 */
2380int
2381zio_wait(zio_t *zio)
34dc7c2f 2382{
9cdf7b1f
MA
2383 /*
2384 * Some routines, like zio_free_sync(), may return a NULL zio
2385 * to avoid the performance overhead of creating and then destroying
2386 * an unneeded zio. For the callers' simplicity, we accept a NULL
2387 * zio and ignore it.
2388 */
2389 if (zio == NULL)
2390 return (0);
2391
8fb1ede1 2392 long timeout = MSEC_TO_TICK(zfs_deadman_ziotime_ms);
b128c09f 2393 int error;
34dc7c2f 2394
1ce23dca
PS
2395 ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN);
2396 ASSERT3P(zio->io_executor, ==, NULL);
34dc7c2f 2397
b128c09f 2398 zio->io_waiter = curthread;
3dfb57a3
DB
2399 ASSERT0(zio->io_queued_timestamp);
2400 zio->io_queued_timestamp = gethrtime();
34dc7c2f 2401
3bd4df38
EN
2402 if (zio->io_type == ZIO_TYPE_WRITE) {
2403 spa_select_allocator(zio);
2404 }
da6b4005 2405 __zio_execute(zio);
34dc7c2f 2406
b128c09f 2407 mutex_enter(&zio->io_lock);
8fb1ede1
BB
2408 while (zio->io_executor != NULL) {
2409 error = cv_timedwait_io(&zio->io_cv, &zio->io_lock,
2410 ddi_get_lbolt() + timeout);
2411
2412 if (zfs_deadman_enabled && error == -1 &&
2413 gethrtime() - zio->io_queued_timestamp >
2414 spa_deadman_ziotime(zio->io_spa)) {
2415 mutex_exit(&zio->io_lock);
2416 timeout = MSEC_TO_TICK(zfs_deadman_checktime_ms);
2417 zio_deadman(zio, FTAG);
2418 mutex_enter(&zio->io_lock);
2419 }
2420 }
b128c09f 2421 mutex_exit(&zio->io_lock);
34dc7c2f 2422
b128c09f
BB
2423 error = zio->io_error;
2424 zio_destroy(zio);
34dc7c2f 2425
b128c09f
BB
2426 return (error);
2427}
34dc7c2f 2428
b128c09f
BB
2429void
2430zio_nowait(zio_t *zio)
2431{
9cdf7b1f
MA
2432 /*
2433 * See comment in zio_wait().
2434 */
2435 if (zio == NULL)
2436 return;
2437
1ce23dca 2438 ASSERT3P(zio->io_executor, ==, NULL);
34dc7c2f 2439
d164b209 2440 if (zio->io_child_type == ZIO_CHILD_LOGICAL &&
b035f2b2 2441 list_is_empty(&zio->io_parent_list)) {
8878261f
BB
2442 zio_t *pio;
2443
34dc7c2f 2444 /*
b128c09f 2445 * This is a logical async I/O with no parent to wait for it.
9babb374
BB
2446 * We add it to the spa_async_root_zio "Godfather" I/O which
2447 * will ensure they complete prior to unloading the pool.
34dc7c2f 2448 */
b128c09f 2449 spa_t *spa = zio->io_spa;
09eb36ce 2450 pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE];
9babb374 2451
8878261f 2452 zio_add_child(pio, zio);
b128c09f 2453 }
34dc7c2f 2454
3dfb57a3
DB
2455 ASSERT0(zio->io_queued_timestamp);
2456 zio->io_queued_timestamp = gethrtime();
3bd4df38
EN
2457 if (zio->io_type == ZIO_TYPE_WRITE) {
2458 spa_select_allocator(zio);
2459 }
da6b4005 2460 __zio_execute(zio);
b128c09f 2461}
34dc7c2f 2462
b128c09f
BB
2463/*
2464 * ==========================================================================
1ce23dca 2465 * Reexecute, cancel, or suspend/resume failed I/O
b128c09f
BB
2466 * ==========================================================================
2467 */
34dc7c2f 2468
b128c09f 2469static void
23c13c7e 2470zio_reexecute(void *arg)
b128c09f 2471{
23c13c7e 2472 zio_t *pio = arg;
3afdc97d 2473 zio_t *cio, *cio_next, *gio;
d164b209
BB
2474
2475 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
2476 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
9babb374
BB
2477 ASSERT(pio->io_gang_leader == NULL);
2478 ASSERT(pio->io_gang_tree == NULL);
34dc7c2f 2479
3afdc97d 2480 mutex_enter(&pio->io_lock);
b128c09f
BB
2481 pio->io_flags = pio->io_orig_flags;
2482 pio->io_stage = pio->io_orig_stage;
2483 pio->io_pipeline = pio->io_orig_pipeline;
2484 pio->io_reexecute = 0;
03c6040b 2485 pio->io_flags |= ZIO_FLAG_REEXECUTED;
3dfb57a3 2486 pio->io_pipeline_trace = 0;
b128c09f 2487 pio->io_error = 0;
3afdc97d
AM
2488 pio->io_state[ZIO_WAIT_READY] = (pio->io_stage >= ZIO_STAGE_READY) ||
2489 (pio->io_pipeline & ZIO_STAGE_READY) == 0;
2490 pio->io_state[ZIO_WAIT_DONE] = (pio->io_stage >= ZIO_STAGE_DONE);
2491 zio_link_t *zl = NULL;
2492 while ((gio = zio_walk_parents(pio, &zl)) != NULL) {
2493 for (int w = 0; w < ZIO_WAIT_TYPES; w++) {
2494 gio->io_children[pio->io_child_type][w] +=
2495 !pio->io_state[w];
2496 }
2497 }
1c27024e 2498 for (int c = 0; c < ZIO_CHILD_TYPES; c++)
b128c09f 2499 pio->io_child_error[c] = 0;
34dc7c2f 2500
428870ff
BB
2501 if (IO_IS_ALLOCATING(pio))
2502 BP_ZERO(pio->io_bp);
34dc7c2f 2503
b128c09f
BB
2504 /*
2505 * As we reexecute pio's children, new children could be created.
d164b209 2506 * New children go to the head of pio's io_child_list, however,
b128c09f 2507 * so we will (correctly) not reexecute them. The key is that
d164b209
BB
2508 * the remainder of pio's io_child_list, from 'cio_next' onward,
2509 * cannot be affected by any side effects of reexecuting 'cio'.
b128c09f 2510 */
3afdc97d 2511 zl = NULL;
3dfb57a3
DB
2512 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
2513 cio_next = zio_walk_children(pio, &zl);
b128c09f 2514 mutex_exit(&pio->io_lock);
d164b209 2515 zio_reexecute(cio);
a8b2e306 2516 mutex_enter(&pio->io_lock);
34dc7c2f 2517 }
a8b2e306 2518 mutex_exit(&pio->io_lock);
34dc7c2f 2519
b128c09f
BB
2520 /*
2521 * Now that all children have been reexecuted, execute the parent.
9babb374 2522 * We don't reexecute "The Godfather" I/O here as it's the
9e2c3bb4 2523 * responsibility of the caller to wait on it.
b128c09f 2524 */
3dfb57a3
DB
2525 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) {
2526 pio->io_queued_timestamp = gethrtime();
da6b4005 2527 __zio_execute(pio);
3dfb57a3 2528 }
34dc7c2f
BB
2529}
2530
b128c09f 2531void
cec3a0a1 2532zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason)
34dc7c2f 2533{
b128c09f
BB
2534 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC)
2535 fm_panic("Pool '%s' has encountered an uncorrectable I/O "
2536 "failure and the failure mode property for this pool "
2537 "is set to panic.", spa_name(spa));
34dc7c2f 2538
c3f2f1aa
DB
2539 if (reason != ZIO_SUSPEND_MMP) {
2540 cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable "
2541 "I/O failure and has been suspended.\n", spa_name(spa));
2542 }
bf89c199 2543
1144586b 2544 (void) zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL,
4f072827 2545 NULL, NULL, 0);
34dc7c2f 2546
b128c09f 2547 mutex_enter(&spa->spa_suspend_lock);
34dc7c2f 2548
b128c09f 2549 if (spa->spa_suspend_zio_root == NULL)
9babb374
BB
2550 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL,
2551 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
2552 ZIO_FLAG_GODFATHER);
34dc7c2f 2553
cec3a0a1 2554 spa->spa_suspended = reason;
34dc7c2f 2555
b128c09f 2556 if (zio != NULL) {
9babb374 2557 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
b128c09f
BB
2558 ASSERT(zio != spa->spa_suspend_zio_root);
2559 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
d164b209 2560 ASSERT(zio_unique_parent(zio) == NULL);
b128c09f
BB
2561 ASSERT(zio->io_stage == ZIO_STAGE_DONE);
2562 zio_add_child(spa->spa_suspend_zio_root, zio);
2563 }
34dc7c2f 2564
b128c09f
BB
2565 mutex_exit(&spa->spa_suspend_lock);
2566}
34dc7c2f 2567
9babb374 2568int
b128c09f
BB
2569zio_resume(spa_t *spa)
2570{
9babb374 2571 zio_t *pio;
34dc7c2f
BB
2572
2573 /*
b128c09f 2574 * Reexecute all previously suspended i/o.
34dc7c2f 2575 */
b128c09f 2576 mutex_enter(&spa->spa_suspend_lock);
cec3a0a1 2577 spa->spa_suspended = ZIO_SUSPEND_NONE;
b128c09f
BB
2578 cv_broadcast(&spa->spa_suspend_cv);
2579 pio = spa->spa_suspend_zio_root;
2580 spa->spa_suspend_zio_root = NULL;
2581 mutex_exit(&spa->spa_suspend_lock);
2582
2583 if (pio == NULL)
9babb374 2584 return (0);
34dc7c2f 2585
9babb374
BB
2586 zio_reexecute(pio);
2587 return (zio_wait(pio));
b128c09f
BB
2588}
2589
2590void
2591zio_resume_wait(spa_t *spa)
2592{
2593 mutex_enter(&spa->spa_suspend_lock);
2594 while (spa_suspended(spa))
2595 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock);
2596 mutex_exit(&spa->spa_suspend_lock);
34dc7c2f
BB
2597}
2598
2599/*
2600 * ==========================================================================
b128c09f
BB
2601 * Gang blocks.
2602 *
2603 * A gang block is a collection of small blocks that looks to the DMU
2604 * like one large block. When zio_dva_allocate() cannot find a block
2605 * of the requested size, due to either severe fragmentation or the pool
2606 * being nearly full, it calls zio_write_gang_block() to construct the
2607 * block from smaller fragments.
2608 *
2609 * A gang block consists of a gang header (zio_gbh_phys_t) and up to
2610 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like
2611 * an indirect block: it's an array of block pointers. It consumes
2612 * only one sector and hence is allocatable regardless of fragmentation.
2613 * The gang header's bps point to its gang members, which hold the data.
2614 *
2615 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg>
2616 * as the verifier to ensure uniqueness of the SHA256 checksum.
2617 * Critically, the gang block bp's blk_cksum is the checksum of the data,
2618 * not the gang header. This ensures that data block signatures (needed for
2619 * deduplication) are independent of how the block is physically stored.
2620 *
2621 * Gang blocks can be nested: a gang member may itself be a gang block.
2622 * Thus every gang block is a tree in which root and all interior nodes are
2623 * gang headers, and the leaves are normal blocks that contain user data.
2624 * The root of the gang tree is called the gang leader.
2625 *
2626 * To perform any operation (read, rewrite, free, claim) on a gang block,
2627 * zio_gang_assemble() first assembles the gang tree (minus data leaves)
2628 * in the io_gang_tree field of the original logical i/o by recursively
2629 * reading the gang leader and all gang headers below it. This yields
2630 * an in-core tree containing the contents of every gang header and the
2631 * bps for every constituent of the gang block.
2632 *
2633 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree
2634 * and invokes a callback on each bp. To free a gang block, zio_gang_issue()
2635 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp.
2636 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim().
2637 * zio_read_gang() is a wrapper around zio_read() that omits reading gang
2638 * headers, since we already have those in io_gang_tree. zio_rewrite_gang()
2639 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite()
2640 * of the gang header plus zio_checksum_compute() of the data to update the
2641 * gang header's blk_cksum as described above.
2642 *
2643 * The two-phase assemble/issue model solves the problem of partial failure --
2644 * what if you'd freed part of a gang block but then couldn't read the
2645 * gang header for another part? Assembling the entire gang tree first
2646 * ensures that all the necessary gang header I/O has succeeded before
2647 * starting the actual work of free, claim, or write. Once the gang tree
2648 * is assembled, free and claim are in-memory operations that cannot fail.
2649 *
2650 * In the event that a gang write fails, zio_dva_unallocate() walks the
2651 * gang tree to immediately free (i.e. insert back into the space map)
2652 * everything we've allocated. This ensures that we don't get ENOSPC
2653 * errors during repeated suspend/resume cycles due to a flaky device.
2654 *
2655 * Gang rewrites only happen during sync-to-convergence. If we can't assemble
2656 * the gang tree, we won't modify the block, so we can safely defer the free
2657 * (knowing that the block is still intact). If we *can* assemble the gang
2658 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free
2659 * each constituent bp and we can allocate a new block on the next sync pass.
2660 *
2661 * In all cases, the gang tree allows complete recovery from partial failure.
34dc7c2f
BB
2662 * ==========================================================================
2663 */
b128c09f 2664
a6255b7f
DQ
2665static void
2666zio_gang_issue_func_done(zio_t *zio)
2667{
e2af2acc 2668 abd_free(zio->io_abd);
a6255b7f
DQ
2669}
2670
b128c09f 2671static zio_t *
a6255b7f
DQ
2672zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2673 uint64_t offset)
34dc7c2f 2674{
b128c09f
BB
2675 if (gn != NULL)
2676 return (pio);
34dc7c2f 2677
a6255b7f
DQ
2678 return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset),
2679 BP_GET_PSIZE(bp), zio_gang_issue_func_done,
2680 NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
b128c09f
BB
2681 &pio->io_bookmark));
2682}
2683
a6255b7f
DQ
2684static zio_t *
2685zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2686 uint64_t offset)
b128c09f
BB
2687{
2688 zio_t *zio;
2689
2690 if (gn != NULL) {
a6255b7f
DQ
2691 abd_t *gbh_abd =
2692 abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
b128c09f 2693 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
a6255b7f
DQ
2694 gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL,
2695 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
2696 &pio->io_bookmark);
34dc7c2f 2697 /*
b128c09f
BB
2698 * As we rewrite each gang header, the pipeline will compute
2699 * a new gang block header checksum for it; but no one will
2700 * compute a new data checksum, so we do that here. The one
2701 * exception is the gang leader: the pipeline already computed
2702 * its data checksum because that stage precedes gang assembly.
2703 * (Presently, nothing actually uses interior data checksums;
2704 * this is just good hygiene.)
34dc7c2f 2705 */
9babb374 2706 if (gn != pio->io_gang_leader->io_gang_tree) {
a6255b7f
DQ
2707 abd_t *buf = abd_get_offset(data, offset);
2708
b128c09f 2709 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp),
a6255b7f
DQ
2710 buf, BP_GET_PSIZE(bp));
2711
e2af2acc 2712 abd_free(buf);
b128c09f 2713 }
428870ff
BB
2714 /*
2715 * If we are here to damage data for testing purposes,
2716 * leave the GBH alone so that we can detect the damage.
2717 */
2718 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE)
2719 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
34dc7c2f 2720 } else {
b128c09f 2721 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
a6255b7f
DQ
2722 abd_get_offset(data, offset), BP_GET_PSIZE(bp),
2723 zio_gang_issue_func_done, NULL, pio->io_priority,
b128c09f 2724 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
34dc7c2f
BB
2725 }
2726
b128c09f
BB
2727 return (zio);
2728}
34dc7c2f 2729
a6255b7f
DQ
2730static zio_t *
2731zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2732 uint64_t offset)
b128c09f 2733{
14e4e3cb
AZ
2734 (void) gn, (void) data, (void) offset;
2735
9cdf7b1f
MA
2736 zio_t *zio = zio_free_sync(pio, pio->io_spa, pio->io_txg, bp,
2737 ZIO_GANG_CHILD_FLAGS(pio));
2738 if (zio == NULL) {
2739 zio = zio_null(pio, pio->io_spa,
2740 NULL, NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio));
2741 }
2742 return (zio);
34dc7c2f
BB
2743}
2744
a6255b7f
DQ
2745static zio_t *
2746zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2747 uint64_t offset)
34dc7c2f 2748{
14e4e3cb 2749 (void) gn, (void) data, (void) offset;
b128c09f
BB
2750 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp,
2751 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)));
2752}
2753
2754static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = {
2755 NULL,
2756 zio_read_gang,
2757 zio_rewrite_gang,
2758 zio_free_gang,
2759 zio_claim_gang,
2760 NULL
2761};
34dc7c2f 2762
b128c09f 2763static void zio_gang_tree_assemble_done(zio_t *zio);
34dc7c2f 2764
b128c09f
BB
2765static zio_gang_node_t *
2766zio_gang_node_alloc(zio_gang_node_t **gnpp)
2767{
2768 zio_gang_node_t *gn;
34dc7c2f 2769
b128c09f 2770 ASSERT(*gnpp == NULL);
34dc7c2f 2771
79c76d5b 2772 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP);
b128c09f
BB
2773 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE);
2774 *gnpp = gn;
34dc7c2f 2775
b128c09f 2776 return (gn);
34dc7c2f
BB
2777}
2778
34dc7c2f 2779static void
b128c09f 2780zio_gang_node_free(zio_gang_node_t **gnpp)
34dc7c2f 2781{
b128c09f 2782 zio_gang_node_t *gn = *gnpp;
34dc7c2f 2783
1c27024e 2784 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
b128c09f
BB
2785 ASSERT(gn->gn_child[g] == NULL);
2786
2787 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2788 kmem_free(gn, sizeof (*gn));
2789 *gnpp = NULL;
34dc7c2f
BB
2790}
2791
b128c09f
BB
2792static void
2793zio_gang_tree_free(zio_gang_node_t **gnpp)
34dc7c2f 2794{
b128c09f 2795 zio_gang_node_t *gn = *gnpp;
34dc7c2f 2796
b128c09f
BB
2797 if (gn == NULL)
2798 return;
34dc7c2f 2799
1c27024e 2800 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
b128c09f 2801 zio_gang_tree_free(&gn->gn_child[g]);
34dc7c2f 2802
b128c09f 2803 zio_gang_node_free(gnpp);
34dc7c2f
BB
2804}
2805
b128c09f 2806static void
9babb374 2807zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp)
34dc7c2f 2808{
b128c09f 2809 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp);
a6255b7f 2810 abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
b128c09f 2811
9babb374 2812 ASSERT(gio->io_gang_leader == gio);
b128c09f 2813 ASSERT(BP_IS_GANG(bp));
34dc7c2f 2814
a6255b7f
DQ
2815 zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE,
2816 zio_gang_tree_assemble_done, gn, gio->io_priority,
2817 ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark));
b128c09f 2818}
34dc7c2f 2819
b128c09f
BB
2820static void
2821zio_gang_tree_assemble_done(zio_t *zio)
2822{
9babb374 2823 zio_t *gio = zio->io_gang_leader;
b128c09f
BB
2824 zio_gang_node_t *gn = zio->io_private;
2825 blkptr_t *bp = zio->io_bp;
34dc7c2f 2826
9babb374 2827 ASSERT(gio == zio_unique_parent(zio));
ccec7fbe 2828 ASSERT(list_is_empty(&zio->io_child_list));
34dc7c2f 2829
b128c09f
BB
2830 if (zio->io_error)
2831 return;
34dc7c2f 2832
a6255b7f 2833 /* this ABD was created from a linear buf in zio_gang_tree_assemble */
b128c09f 2834 if (BP_SHOULD_BYTESWAP(bp))
a6255b7f 2835 byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size);
34dc7c2f 2836
a6255b7f 2837 ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh);
b128c09f 2838 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE);
428870ff 2839 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
34dc7c2f 2840
e2af2acc 2841 abd_free(zio->io_abd);
a6255b7f 2842
1c27024e 2843 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
b128c09f
BB
2844 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
2845 if (!BP_IS_GANG(gbp))
2846 continue;
9babb374 2847 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]);
b128c09f 2848 }
34dc7c2f
BB
2849}
2850
b128c09f 2851static void
a6255b7f
DQ
2852zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data,
2853 uint64_t offset)
34dc7c2f 2854{
9babb374 2855 zio_t *gio = pio->io_gang_leader;
b128c09f 2856 zio_t *zio;
34dc7c2f 2857
b128c09f 2858 ASSERT(BP_IS_GANG(bp) == !!gn);
9babb374
BB
2859 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp));
2860 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree);
34dc7c2f 2861
b128c09f
BB
2862 /*
2863 * If you're a gang header, your data is in gn->gn_gbh.
2864 * If you're a gang member, your data is in 'data' and gn == NULL.
2865 */
a6255b7f 2866 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset);
34dc7c2f 2867
b128c09f 2868 if (gn != NULL) {
428870ff 2869 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
34dc7c2f 2870
1c27024e 2871 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
b128c09f
BB
2872 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
2873 if (BP_IS_HOLE(gbp))
2874 continue;
a6255b7f
DQ
2875 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data,
2876 offset);
2877 offset += BP_GET_PSIZE(gbp);
b128c09f 2878 }
34dc7c2f
BB
2879 }
2880
9babb374 2881 if (gn == gio->io_gang_tree)
a6255b7f 2882 ASSERT3U(gio->io_size, ==, offset);
34dc7c2f 2883
b128c09f
BB
2884 if (zio != pio)
2885 zio_nowait(zio);
34dc7c2f
BB
2886}
2887
62840030 2888static zio_t *
b128c09f 2889zio_gang_assemble(zio_t *zio)
34dc7c2f 2890{
b128c09f 2891 blkptr_t *bp = zio->io_bp;
34dc7c2f 2892
9babb374
BB
2893 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL);
2894 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
2895
2896 zio->io_gang_leader = zio;
34dc7c2f 2897
b128c09f 2898 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree);
34dc7c2f 2899
62840030 2900 return (zio);
34dc7c2f
BB
2901}
2902
62840030 2903static zio_t *
b128c09f 2904zio_gang_issue(zio_t *zio)
34dc7c2f 2905{
b128c09f 2906 blkptr_t *bp = zio->io_bp;
34dc7c2f 2907
ddc751d5 2908 if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) {
62840030 2909 return (NULL);
ddc751d5 2910 }
34dc7c2f 2911
9babb374
BB
2912 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio);
2913 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
34dc7c2f 2914
b128c09f 2915 if (zio->io_child_error[ZIO_CHILD_GANG] == 0)
a6255b7f
DQ
2916 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd,
2917 0);
b128c09f 2918 else
9babb374 2919 zio_gang_tree_free(&zio->io_gang_tree);
34dc7c2f 2920
b128c09f 2921 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
34dc7c2f 2922
62840030 2923 return (zio);
34dc7c2f
BB
2924}
2925
3bd4df38
EN
2926static void
2927zio_gang_inherit_allocator(zio_t *pio, zio_t *cio)
2928{
2929 cio->io_allocator = pio->io_allocator;
3bd4df38
EN
2930}
2931
34dc7c2f 2932static void
b128c09f 2933zio_write_gang_member_ready(zio_t *zio)
34dc7c2f 2934{
d164b209 2935 zio_t *pio = zio_unique_parent(zio);
34dc7c2f
BB
2936 dva_t *cdva = zio->io_bp->blk_dva;
2937 dva_t *pdva = pio->io_bp->blk_dva;
2938 uint64_t asize;
2a8ba608 2939 zio_t *gio __maybe_unused = zio->io_gang_leader;
34dc7c2f 2940
b128c09f
BB
2941 if (BP_IS_HOLE(zio->io_bp))
2942 return;
2943
2944 ASSERT(BP_IS_HOLE(&zio->io_bp_orig));
2945
2946 ASSERT(zio->io_child_type == ZIO_CHILD_GANG);
428870ff
BB
2947 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies);
2948 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp));
2949 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp));
14872aaa 2950 VERIFY3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
34dc7c2f
BB
2951
2952 mutex_enter(&pio->io_lock);
1c27024e 2953 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
34dc7c2f
BB
2954 ASSERT(DVA_GET_GANG(&pdva[d]));
2955 asize = DVA_GET_ASIZE(&pdva[d]);
2956 asize += DVA_GET_ASIZE(&cdva[d]);
2957 DVA_SET_ASIZE(&pdva[d], asize);
2958 }
2959 mutex_exit(&pio->io_lock);
2960}
2961
a6255b7f
DQ
2962static void
2963zio_write_gang_done(zio_t *zio)
2964{
c955398b
BL
2965 /*
2966 * The io_abd field will be NULL for a zio with no data. The io_flags
2967 * will initially have the ZIO_FLAG_NODATA bit flag set, but we can't
2968 * check for it here as it is cleared in zio_ready.
2969 */
2970 if (zio->io_abd != NULL)
e2af2acc 2971 abd_free(zio->io_abd);
a6255b7f
DQ
2972}
2973
62840030 2974static zio_t *
aa755b35 2975zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
34dc7c2f 2976{
b128c09f
BB
2977 spa_t *spa = pio->io_spa;
2978 blkptr_t *bp = pio->io_bp;
9babb374 2979 zio_t *gio = pio->io_gang_leader;
b128c09f
BB
2980 zio_t *zio;
2981 zio_gang_node_t *gn, **gnpp;
34dc7c2f 2982 zio_gbh_phys_t *gbh;
a6255b7f 2983 abd_t *gbh_abd;
b128c09f
BB
2984 uint64_t txg = pio->io_txg;
2985 uint64_t resid = pio->io_size;
2986 uint64_t lsize;
428870ff 2987 int copies = gio->io_prop.zp_copies;
b128c09f 2988 zio_prop_t zp;
1c27024e 2989 int error;
c955398b 2990 boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA);
b5256303
TC
2991
2992 /*
14872aaa
MA
2993 * If one copy was requested, store 2 copies of the GBH, so that we
2994 * can still traverse all the data (e.g. to free or scrub) even if a
2995 * block is damaged. Note that we can't store 3 copies of the GBH in
2996 * all cases, e.g. with encryption, which uses DVA[2] for the IV+salt.
b5256303 2997 */
14872aaa
MA
2998 int gbh_copies = copies;
2999 if (gbh_copies == 1) {
3000 gbh_copies = MIN(2, spa_max_replication(spa));
3001 }
b5256303 3002
3bd4df38 3003 ASSERT(ZIO_HAS_ALLOCATOR(pio));
1c27024e 3004 int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER;
3dfb57a3
DB
3005 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
3006 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
c955398b 3007 ASSERT(has_data);
3dfb57a3
DB
3008
3009 flags |= METASLAB_ASYNC_ALLOC;
f8020c93
AM
3010 VERIFY(zfs_refcount_held(&mc->mc_allocator[pio->io_allocator].
3011 mca_alloc_slots, pio));
3dfb57a3
DB
3012
3013 /*
3014 * The logical zio has already placed a reservation for
3015 * 'copies' allocation slots but gang blocks may require
3016 * additional copies. These additional copies
3017 * (i.e. gbh_copies - copies) are guaranteed to succeed
3018 * since metaslab_class_throttle_reserve() always allows
3019 * additional reservations for gang blocks.
3020 */
3021 VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies,
492f64e9 3022 pio->io_allocator, pio, flags));
3dfb57a3
DB
3023 }
3024
3025 error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE,
4e21fd06 3026 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
492f64e9 3027 &pio->io_alloc_list, pio, pio->io_allocator);
34dc7c2f 3028 if (error) {
3dfb57a3
DB
3029 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
3030 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
c955398b 3031 ASSERT(has_data);
3dfb57a3
DB
3032
3033 /*
3034 * If we failed to allocate the gang block header then
3035 * we remove any additional allocation reservations that
3036 * we placed here. The original reservation will
3037 * be removed when the logical I/O goes to the ready
3038 * stage.
3039 */
3040 metaslab_class_throttle_unreserve(mc,
492f64e9 3041 gbh_copies - copies, pio->io_allocator, pio);
3dfb57a3
DB
3042 }
3043
b128c09f 3044 pio->io_error = error;
62840030 3045 return (pio);
34dc7c2f
BB
3046 }
3047
9babb374
BB
3048 if (pio == gio) {
3049 gnpp = &gio->io_gang_tree;
b128c09f
BB
3050 } else {
3051 gnpp = pio->io_private;
3052 ASSERT(pio->io_ready == zio_write_gang_member_ready);
34dc7c2f
BB
3053 }
3054
b128c09f
BB
3055 gn = zio_gang_node_alloc(gnpp);
3056 gbh = gn->gn_gbh;
861166b0 3057 memset(gbh, 0, SPA_GANGBLOCKSIZE);
a6255b7f 3058 gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE);
34dc7c2f 3059
b128c09f
BB
3060 /*
3061 * Create the gang header.
3062 */
a6255b7f
DQ
3063 zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE,
3064 zio_write_gang_done, NULL, pio->io_priority,
3065 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
34dc7c2f 3066
3bd4df38
EN
3067 zio_gang_inherit_allocator(pio, zio);
3068
b128c09f
BB
3069 /*
3070 * Create and nowait the gang children.
3071 */
1c27024e 3072 for (int g = 0; resid != 0; resid -= lsize, g++) {
b128c09f
BB
3073 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g),
3074 SPA_MINBLOCKSIZE);
3075 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid);
3076
9babb374 3077 zp.zp_checksum = gio->io_prop.zp_checksum;
b128c09f 3078 zp.zp_compress = ZIO_COMPRESS_OFF;
10b3c7f5 3079 zp.zp_complevel = gio->io_prop.zp_complevel;
b128c09f
BB
3080 zp.zp_type = DMU_OT_NONE;
3081 zp.zp_level = 0;
428870ff 3082 zp.zp_copies = gio->io_prop.zp_copies;
03c6040b
GW
3083 zp.zp_dedup = B_FALSE;
3084 zp.zp_dedup_verify = B_FALSE;
3085 zp.zp_nopwrite = B_FALSE;
4807c0ba
TC
3086 zp.zp_encrypt = gio->io_prop.zp_encrypt;
3087 zp.zp_byteorder = gio->io_prop.zp_byteorder;
861166b0
AZ
3088 memset(zp.zp_salt, 0, ZIO_DATA_SALT_LEN);
3089 memset(zp.zp_iv, 0, ZIO_DATA_IV_LEN);
3090 memset(zp.zp_mac, 0, ZIO_DATA_MAC_LEN);
b128c09f 3091
1c27024e 3092 zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g],
c955398b
BL
3093 has_data ? abd_get_offset(pio->io_abd, pio->io_size -
3094 resid) : NULL, lsize, lsize, &zp,
ccec7fbe 3095 zio_write_gang_member_ready, NULL,
a6255b7f 3096 zio_write_gang_done, &gn->gn_child[g], pio->io_priority,
3dfb57a3
DB
3097 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
3098
3bd4df38
EN
3099 zio_gang_inherit_allocator(zio, cio);
3100
3dfb57a3
DB
3101 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
3102 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
c955398b 3103 ASSERT(has_data);
3dfb57a3
DB
3104
3105 /*
3106 * Gang children won't throttle but we should
3107 * account for their work, so reserve an allocation
3108 * slot for them here.
3109 */
3110 VERIFY(metaslab_class_throttle_reserve(mc,
492f64e9 3111 zp.zp_copies, cio->io_allocator, cio, flags));
3dfb57a3
DB
3112 }
3113 zio_nowait(cio);
b128c09f 3114 }
34dc7c2f
BB
3115
3116 /*
b128c09f 3117 * Set pio's pipeline to just wait for zio to finish.
34dc7c2f 3118 */
b128c09f
BB
3119 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3120
3121 zio_nowait(zio);
3122
62840030 3123 return (pio);
34dc7c2f
BB
3124}
3125
03c6040b 3126/*
3c67d83a
TH
3127 * The zio_nop_write stage in the pipeline determines if allocating a
3128 * new bp is necessary. The nopwrite feature can handle writes in
3129 * either syncing or open context (i.e. zil writes) and as a result is
3130 * mutually exclusive with dedup.
3131 *
3132 * By leveraging a cryptographically secure checksum, such as SHA256, we
3133 * can compare the checksums of the new data and the old to determine if
3134 * allocating a new block is required. Note that our requirements for
3135 * cryptographic strength are fairly weak: there can't be any accidental
3136 * hash collisions, but we don't need to be secure against intentional
3137 * (malicious) collisions. To trigger a nopwrite, you have to be able
3138 * to write the file to begin with, and triggering an incorrect (hash
3139 * collision) nopwrite is no worse than simply writing to the file.
3140 * That said, there are no known attacks against the checksum algorithms
3141 * used for nopwrite, assuming that the salt and the checksums
3142 * themselves remain secret.
03c6040b 3143 */
62840030 3144static zio_t *
03c6040b
GW
3145zio_nop_write(zio_t *zio)
3146{
3147 blkptr_t *bp = zio->io_bp;
3148 blkptr_t *bp_orig = &zio->io_bp_orig;
3149 zio_prop_t *zp = &zio->io_prop;
3150
d7cf06a2 3151 ASSERT(BP_IS_HOLE(bp));
03c6040b
GW
3152 ASSERT(BP_GET_LEVEL(bp) == 0);
3153 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
3154 ASSERT(zp->zp_nopwrite);
3155 ASSERT(!zp->zp_dedup);
3156 ASSERT(zio->io_bp_override == NULL);
3157 ASSERT(IO_IS_ALLOCATING(zio));
3158
3159 /*
3160 * Check to see if the original bp and the new bp have matching
3161 * characteristics (i.e. same checksum, compression algorithms, etc).
3162 * If they don't then just continue with the pipeline which will
3163 * allocate a new bp.
3164 */
3165 if (BP_IS_HOLE(bp_orig) ||
3c67d83a
TH
3166 !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags &
3167 ZCHECKSUM_FLAG_NOPWRITE) ||
b5256303 3168 BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) ||
03c6040b
GW
3169 BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) ||
3170 BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) ||
3171 BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) ||
3172 zp->zp_copies != BP_GET_NDVAS(bp_orig))
62840030 3173 return (zio);
03c6040b
GW
3174
3175 /*
3176 * If the checksums match then reset the pipeline so that we
3177 * avoid allocating a new bp and issuing any I/O.
3178 */
3179 if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) {
3c67d83a
TH
3180 ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags &
3181 ZCHECKSUM_FLAG_NOPWRITE);
03c6040b
GW
3182 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig));
3183 ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig));
3184 ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF);
d7cf06a2 3185 ASSERT3U(bp->blk_prop, ==, bp_orig->blk_prop);
03c6040b 3186
681a85cb
GW
3187 /*
3188 * If we're overwriting a block that is currently on an
3189 * indirect vdev, then ignore the nopwrite request and
3190 * allow a new block to be allocated on a concrete vdev.
3191 */
3192 spa_config_enter(zio->io_spa, SCL_VDEV, FTAG, RW_READER);
d7cf06a2
GW
3193 for (int d = 0; d < BP_GET_NDVAS(bp_orig); d++) {
3194 vdev_t *tvd = vdev_lookup_top(zio->io_spa,
3195 DVA_GET_VDEV(&bp_orig->blk_dva[d]));
3196 if (tvd->vdev_ops == &vdev_indirect_ops) {
3197 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
3198 return (zio);
3199 }
681a85cb
GW
3200 }
3201 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
3202
03c6040b
GW
3203 *bp = *bp_orig;
3204 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3205 zio->io_flags |= ZIO_FLAG_NOPWRITE;
3206 }
3207
62840030 3208 return (zio);
03c6040b
GW
3209}
3210
67a1b037
PJD
3211/*
3212 * ==========================================================================
3213 * Block Reference Table
3214 * ==========================================================================
3215 */
3216static zio_t *
3217zio_brt_free(zio_t *zio)
3218{
3219 blkptr_t *bp;
3220
3221 bp = zio->io_bp;
3222
3223 if (BP_GET_LEVEL(bp) > 0 ||
3224 BP_IS_METADATA(bp) ||
3225 !brt_maybe_exists(zio->io_spa, bp)) {
3226 return (zio);
3227 }
3228
3229 if (!brt_entry_decref(zio->io_spa, bp)) {
3230 /*
3231 * This isn't the last reference, so we cannot free
3232 * the data yet.
3233 */
3234 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3235 }
3236
3237 return (zio);
3238}
3239
34dc7c2f
BB
3240/*
3241 * ==========================================================================
428870ff 3242 * Dedup
34dc7c2f
BB
3243 * ==========================================================================
3244 */
428870ff
BB
3245static void
3246zio_ddt_child_read_done(zio_t *zio)
3247{
3248 blkptr_t *bp = zio->io_bp;
3249 ddt_entry_t *dde = zio->io_private;
3250 ddt_phys_t *ddp;
3251 zio_t *pio = zio_unique_parent(zio);
3252
3253 mutex_enter(&pio->io_lock);
3254 ddp = ddt_phys_select(dde, bp);
3255 if (zio->io_error == 0)
3256 ddt_phys_clear(ddp); /* this ddp doesn't need repair */
a6255b7f
DQ
3257
3258 if (zio->io_error == 0 && dde->dde_repair_abd == NULL)
3259 dde->dde_repair_abd = zio->io_abd;
428870ff 3260 else
a6255b7f 3261 abd_free(zio->io_abd);
428870ff
BB
3262 mutex_exit(&pio->io_lock);
3263}
3264
62840030 3265static zio_t *
428870ff
BB
3266zio_ddt_read_start(zio_t *zio)
3267{
3268 blkptr_t *bp = zio->io_bp;
3269
3270 ASSERT(BP_GET_DEDUP(bp));
3271 ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
3272 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3273
3274 if (zio->io_child_error[ZIO_CHILD_DDT]) {
3275 ddt_t *ddt = ddt_select(zio->io_spa, bp);
3276 ddt_entry_t *dde = ddt_repair_start(ddt, bp);
3277 ddt_phys_t *ddp = dde->dde_phys;
3278 ddt_phys_t *ddp_self = ddt_phys_select(dde, bp);
3279 blkptr_t blk;
3280
3281 ASSERT(zio->io_vsd == NULL);
3282 zio->io_vsd = dde;
3283
3284 if (ddp_self == NULL)
62840030 3285 return (zio);
428870ff 3286
1c27024e 3287 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
428870ff
BB
3288 if (ddp->ddp_phys_birth == 0 || ddp == ddp_self)
3289 continue;
3290 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp,
3291 &blk);
3292 zio_nowait(zio_read(zio, zio->io_spa, &blk,
a6255b7f
DQ
3293 abd_alloc_for_io(zio->io_size, B_TRUE),
3294 zio->io_size, zio_ddt_child_read_done, dde,
3295 zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) |
3296 ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark));
428870ff 3297 }
62840030 3298 return (zio);
428870ff
BB
3299 }
3300
3301 zio_nowait(zio_read(zio, zio->io_spa, bp,
a6255b7f 3302 zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority,
428870ff
BB
3303 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark));
3304
62840030 3305 return (zio);
428870ff
BB
3306}
3307
62840030 3308static zio_t *
428870ff
BB
3309zio_ddt_read_done(zio_t *zio)
3310{
3311 blkptr_t *bp = zio->io_bp;
3312
ddc751d5 3313 if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) {
62840030 3314 return (NULL);
ddc751d5 3315 }
428870ff
BB
3316
3317 ASSERT(BP_GET_DEDUP(bp));
3318 ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
3319 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3320
3321 if (zio->io_child_error[ZIO_CHILD_DDT]) {
3322 ddt_t *ddt = ddt_select(zio->io_spa, bp);
3323 ddt_entry_t *dde = zio->io_vsd;
3324 if (ddt == NULL) {
3325 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE);
62840030 3326 return (zio);
428870ff
BB
3327 }
3328 if (dde == NULL) {
3329 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1;
3330 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
62840030 3331 return (NULL);
428870ff 3332 }
a6255b7f
DQ
3333 if (dde->dde_repair_abd != NULL) {
3334 abd_copy(zio->io_abd, dde->dde_repair_abd,
3335 zio->io_size);
428870ff
BB
3336 zio->io_child_error[ZIO_CHILD_DDT] = 0;
3337 }
3338 ddt_repair_done(ddt, dde);
3339 zio->io_vsd = NULL;
3340 }
3341
3342 ASSERT(zio->io_vsd == NULL);
3343
62840030 3344 return (zio);
428870ff
BB
3345}
3346
3347static boolean_t
3348zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
3349{
3350 spa_t *spa = zio->io_spa;
c17bcf83 3351 boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW);
428870ff 3352
c17bcf83 3353 ASSERT(!(zio->io_bp_override && do_raw));
2aa34383 3354
428870ff
BB
3355 /*
3356 * Note: we compare the original data, not the transformed data,
3357 * because when zio->io_bp is an override bp, we will not have
3358 * pushed the I/O transforms. That's an important optimization
3359 * because otherwise we'd compress/encrypt all dmu_sync() data twice.
c17bcf83 3360 * However, we should never get a raw, override zio so in these
b5256303 3361 * cases we can compare the io_abd directly. This is useful because
c17bcf83
TC
3362 * it allows us to do dedup verification even if we don't have access
3363 * to the original data (for instance, if the encryption keys aren't
3364 * loaded).
428870ff 3365 */
c17bcf83 3366
1c27024e 3367 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
428870ff
BB
3368 zio_t *lio = dde->dde_lead_zio[p];
3369
c17bcf83
TC
3370 if (lio != NULL && do_raw) {
3371 return (lio->io_size != zio->io_size ||
a6255b7f 3372 abd_cmp(zio->io_abd, lio->io_abd) != 0);
c17bcf83 3373 } else if (lio != NULL) {
428870ff 3374 return (lio->io_orig_size != zio->io_orig_size ||
a6255b7f 3375 abd_cmp(zio->io_orig_abd, lio->io_orig_abd) != 0);
428870ff
BB
3376 }
3377 }
3378
1c27024e 3379 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
428870ff
BB
3380 ddt_phys_t *ddp = &dde->dde_phys[p];
3381
c17bcf83
TC
3382 if (ddp->ddp_phys_birth != 0 && do_raw) {
3383 blkptr_t blk = *zio->io_bp;
3384 uint64_t psize;
a6255b7f 3385 abd_t *tmpabd;
c17bcf83
TC
3386 int error;
3387
3388 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
3389 psize = BP_GET_PSIZE(&blk);
3390
3391 if (psize != zio->io_size)
3392 return (B_TRUE);
3393
3394 ddt_exit(ddt);
3395
a6255b7f 3396 tmpabd = abd_alloc_for_io(psize, B_TRUE);
c17bcf83 3397
a6255b7f 3398 error = zio_wait(zio_read(NULL, spa, &blk, tmpabd,
c17bcf83
TC
3399 psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
3400 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
3401 ZIO_FLAG_RAW, &zio->io_bookmark));
3402
3403 if (error == 0) {
a6255b7f 3404 if (abd_cmp(tmpabd, zio->io_abd) != 0)
c17bcf83
TC
3405 error = SET_ERROR(ENOENT);
3406 }
3407
a6255b7f 3408 abd_free(tmpabd);
c17bcf83
TC
3409 ddt_enter(ddt);
3410 return (error != 0);
3411 } else if (ddp->ddp_phys_birth != 0) {
428870ff 3412 arc_buf_t *abuf = NULL;
2a432414 3413 arc_flags_t aflags = ARC_FLAG_WAIT;
428870ff
BB
3414 blkptr_t blk = *zio->io_bp;
3415 int error;
3416
3417 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
3418
c17bcf83
TC
3419 if (BP_GET_LSIZE(&blk) != zio->io_orig_size)
3420 return (B_TRUE);
3421
428870ff
BB
3422 ddt_exit(ddt);
3423
294f6806 3424 error = arc_read(NULL, spa, &blk,
428870ff
BB
3425 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ,
3426 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3427 &aflags, &zio->io_bookmark);
3428
3429 if (error == 0) {
a6255b7f 3430 if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data,
428870ff 3431 zio->io_orig_size) != 0)
c17bcf83 3432 error = SET_ERROR(ENOENT);
d3c2ae1c 3433 arc_buf_destroy(abuf, &abuf);
428870ff
BB
3434 }
3435
3436 ddt_enter(ddt);
3437 return (error != 0);
3438 }
3439 }
3440
3441 return (B_FALSE);
3442}
3443
3444static void
3445zio_ddt_child_write_ready(zio_t *zio)
3446{
3447 int p = zio->io_prop.zp_copies;
3448 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
3449 ddt_entry_t *dde = zio->io_private;
3450 ddt_phys_t *ddp = &dde->dde_phys[p];
3451 zio_t *pio;
3452
3453 if (zio->io_error)
3454 return;
3455
3456 ddt_enter(ddt);
3457
3458 ASSERT(dde->dde_lead_zio[p] == zio);
3459
3460 ddt_phys_fill(ddp, zio->io_bp);
3461
1c27024e 3462 zio_link_t *zl = NULL;
3dfb57a3 3463 while ((pio = zio_walk_parents(zio, &zl)) != NULL)
428870ff
BB
3464 ddt_bp_fill(ddp, pio->io_bp, zio->io_txg);
3465
3466 ddt_exit(ddt);
3467}
3468
3469static void
3470zio_ddt_child_write_done(zio_t *zio)
3471{
3472 int p = zio->io_prop.zp_copies;
3473 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
3474 ddt_entry_t *dde = zio->io_private;
3475 ddt_phys_t *ddp = &dde->dde_phys[p];
3476
3477 ddt_enter(ddt);
3478
3479 ASSERT(ddp->ddp_refcnt == 0);
3480 ASSERT(dde->dde_lead_zio[p] == zio);
3481 dde->dde_lead_zio[p] = NULL;
3482
3483 if (zio->io_error == 0) {
3dfb57a3
DB
3484 zio_link_t *zl = NULL;
3485 while (zio_walk_parents(zio, &zl) != NULL)
428870ff
BB
3486 ddt_phys_addref(ddp);
3487 } else {
3488 ddt_phys_clear(ddp);
3489 }
3490
3491 ddt_exit(ddt);
3492}
3493
62840030 3494static zio_t *
428870ff
BB
3495zio_ddt_write(zio_t *zio)
3496{
3497 spa_t *spa = zio->io_spa;
3498 blkptr_t *bp = zio->io_bp;
3499 uint64_t txg = zio->io_txg;
3500 zio_prop_t *zp = &zio->io_prop;
3501 int p = zp->zp_copies;
428870ff 3502 zio_t *cio = NULL;
428870ff
BB
3503 ddt_t *ddt = ddt_select(spa, bp);
3504 ddt_entry_t *dde;
3505 ddt_phys_t *ddp;
3506
3507 ASSERT(BP_GET_DEDUP(bp));
3508 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum);
3509 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override);
c17bcf83 3510 ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW)));
428870ff
BB
3511
3512 ddt_enter(ddt);
3513 dde = ddt_lookup(ddt, bp, B_TRUE);
3514 ddp = &dde->dde_phys[p];
3515
3516 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) {
3517 /*
3518 * If we're using a weak checksum, upgrade to a strong checksum
3519 * and try again. If we're already using a strong checksum,
3520 * we can't resolve it, so just convert to an ordinary write.
3521 * (And automatically e-mail a paper to Nature?)
3522 */
3c67d83a
TH
3523 if (!(zio_checksum_table[zp->zp_checksum].ci_flags &
3524 ZCHECKSUM_FLAG_DEDUP)) {
428870ff
BB
3525 zp->zp_checksum = spa_dedup_checksum(spa);
3526 zio_pop_transforms(zio);
3527 zio->io_stage = ZIO_STAGE_OPEN;
3528 BP_ZERO(bp);
3529 } else {
03c6040b 3530 zp->zp_dedup = B_FALSE;
accd6d9d 3531 BP_SET_DEDUP(bp, B_FALSE);
428870ff 3532 }
accd6d9d 3533 ASSERT(!BP_GET_DEDUP(bp));
428870ff
BB
3534 zio->io_pipeline = ZIO_WRITE_PIPELINE;
3535 ddt_exit(ddt);
62840030 3536 return (zio);
428870ff
BB
3537 }
3538
428870ff
BB
3539 if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) {
3540 if (ddp->ddp_phys_birth != 0)
3541 ddt_bp_fill(ddp, bp, txg);
3542 if (dde->dde_lead_zio[p] != NULL)
3543 zio_add_child(zio, dde->dde_lead_zio[p]);
3544 else
3545 ddt_phys_addref(ddp);
3546 } else if (zio->io_bp_override) {
493fcce9 3547 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == txg);
428870ff
BB
3548 ASSERT(BP_EQUAL(bp, zio->io_bp_override));
3549 ddt_phys_fill(ddp, bp);
3550 ddt_phys_addref(ddp);
3551 } else {
a6255b7f 3552 cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd,
2aa34383 3553 zio->io_orig_size, zio->io_orig_size, zp,
ccec7fbe 3554 zio_ddt_child_write_ready, NULL,
428870ff
BB
3555 zio_ddt_child_write_done, dde, zio->io_priority,
3556 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
3557
a6255b7f 3558 zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL);
428870ff
BB
3559 dde->dde_lead_zio[p] = cio;
3560 }
3561
3562 ddt_exit(ddt);
3563
9cdf7b1f 3564 zio_nowait(cio);
428870ff 3565
62840030 3566 return (zio);
428870ff
BB
3567}
3568
27218a32 3569static ddt_entry_t *freedde; /* for debugging */
b128c09f 3570
62840030 3571static zio_t *
428870ff
BB
3572zio_ddt_free(zio_t *zio)
3573{
3574 spa_t *spa = zio->io_spa;
3575 blkptr_t *bp = zio->io_bp;
3576 ddt_t *ddt = ddt_select(spa, bp);
3577 ddt_entry_t *dde;
3578 ddt_phys_t *ddp;
3579
3580 ASSERT(BP_GET_DEDUP(bp));
3581 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3582
3583 ddt_enter(ddt);
3584 freedde = dde = ddt_lookup(ddt, bp, B_TRUE);
5dc6af0e
BB
3585 if (dde) {
3586 ddp = ddt_phys_select(dde, bp);
3587 if (ddp)
3588 ddt_phys_decref(ddp);
3589 }
428870ff
BB
3590 ddt_exit(ddt);
3591
62840030 3592 return (zio);
428870ff
BB
3593}
3594
3595/*
3596 * ==========================================================================
3597 * Allocate and free blocks
3598 * ==========================================================================
3599 */
3dfb57a3
DB
3600
3601static zio_t *
492f64e9 3602zio_io_to_allocate(spa_t *spa, int allocator)
3dfb57a3
DB
3603{
3604 zio_t *zio;
3605
1b50749c 3606 ASSERT(MUTEX_HELD(&spa->spa_allocs[allocator].spaa_lock));
3dfb57a3 3607
1b50749c 3608 zio = avl_first(&spa->spa_allocs[allocator].spaa_tree);
3dfb57a3
DB
3609 if (zio == NULL)
3610 return (NULL);
3611
3612 ASSERT(IO_IS_ALLOCATING(zio));
3bd4df38 3613 ASSERT(ZIO_HAS_ALLOCATOR(zio));
3dfb57a3
DB
3614
3615 /*
3616 * Try to place a reservation for this zio. If we're unable to
3617 * reserve then we throttle.
3618 */
492f64e9 3619 ASSERT3U(zio->io_allocator, ==, allocator);
cc99f275 3620 if (!metaslab_class_throttle_reserve(zio->io_metaslab_class,
1b50749c 3621 zio->io_prop.zp_copies, allocator, zio, 0)) {
3dfb57a3
DB
3622 return (NULL);
3623 }
3624
1b50749c 3625 avl_remove(&spa->spa_allocs[allocator].spaa_tree, zio);
3dfb57a3
DB
3626 ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE);
3627
3628 return (zio);
3629}
3630
62840030 3631static zio_t *
3dfb57a3
DB
3632zio_dva_throttle(zio_t *zio)
3633{
3634 spa_t *spa = zio->io_spa;
3635 zio_t *nio;
cc99f275
DB
3636 metaslab_class_t *mc;
3637
3638 /* locate an appropriate allocation class */
3639 mc = spa_preferred_class(spa, zio->io_size, zio->io_prop.zp_type,
3640 zio->io_prop.zp_level, zio->io_prop.zp_zpl_smallblk);
3dfb57a3
DB
3641
3642 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE ||
cc99f275 3643 !mc->mc_alloc_throttle_enabled ||
3dfb57a3
DB
3644 zio->io_child_type == ZIO_CHILD_GANG ||
3645 zio->io_flags & ZIO_FLAG_NODATA) {
62840030 3646 return (zio);
3dfb57a3
DB
3647 }
3648
1b50749c 3649 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
3bd4df38 3650 ASSERT(ZIO_HAS_ALLOCATOR(zio));
3dfb57a3 3651 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
3dfb57a3
DB
3652 ASSERT3U(zio->io_queued_timestamp, >, 0);
3653 ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE);
3654
3bd4df38 3655 int allocator = zio->io_allocator;
cc99f275 3656 zio->io_metaslab_class = mc;
1b50749c
AM
3657 mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
3658 avl_add(&spa->spa_allocs[allocator].spaa_tree, zio);
3659 nio = zio_io_to_allocate(spa, allocator);
3660 mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
62840030 3661 return (nio);
3dfb57a3
DB
3662}
3663
cc99f275 3664static void
492f64e9 3665zio_allocate_dispatch(spa_t *spa, int allocator)
3dfb57a3
DB
3666{
3667 zio_t *zio;
3668
1b50749c 3669 mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
492f64e9 3670 zio = zio_io_to_allocate(spa, allocator);
1b50749c 3671 mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
3dfb57a3
DB
3672 if (zio == NULL)
3673 return;
3674
3675 ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE);
3676 ASSERT0(zio->io_error);
3677 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE);
3678}
3679
62840030 3680static zio_t *
34dc7c2f
BB
3681zio_dva_allocate(zio_t *zio)
3682{
3683 spa_t *spa = zio->io_spa;
cc99f275 3684 metaslab_class_t *mc;
34dc7c2f
BB
3685 blkptr_t *bp = zio->io_bp;
3686 int error;
6d974228 3687 int flags = 0;
34dc7c2f 3688
9babb374
BB
3689 if (zio->io_gang_leader == NULL) {
3690 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
3691 zio->io_gang_leader = zio;
3692 }
3693
34dc7c2f 3694 ASSERT(BP_IS_HOLE(bp));
c99c9001 3695 ASSERT0(BP_GET_NDVAS(bp));
428870ff
BB
3696 ASSERT3U(zio->io_prop.zp_copies, >, 0);
3697 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa));
34dc7c2f
BB
3698 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
3699
3dfb57a3
DB
3700 if (zio->io_flags & ZIO_FLAG_NODATA)
3701 flags |= METASLAB_DONT_THROTTLE;
3702 if (zio->io_flags & ZIO_FLAG_GANG_CHILD)
3703 flags |= METASLAB_GANG_CHILD;
3704 if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE)
3705 flags |= METASLAB_ASYNC_ALLOC;
3706
cc99f275
DB
3707 /*
3708 * if not already chosen, locate an appropriate allocation class
3709 */
3710 mc = zio->io_metaslab_class;
3711 if (mc == NULL) {
3712 mc = spa_preferred_class(spa, zio->io_size,
3713 zio->io_prop.zp_type, zio->io_prop.zp_level,
3714 zio->io_prop.zp_zpl_smallblk);
3715 zio->io_metaslab_class = mc;
3716 }
3717
aa755b35
MA
3718 /*
3719 * Try allocating the block in the usual metaslab class.
3720 * If that's full, allocate it in the normal class.
3721 * If that's full, allocate as a gang block,
3722 * and if all are full, the allocation fails (which shouldn't happen).
3723 *
3724 * Note that we do not fall back on embedded slog (ZIL) space, to
3725 * preserve unfragmented slog space, which is critical for decent
3726 * sync write performance. If a log allocation fails, we will fall
3727 * back to spa_sync() which is abysmal for performance.
3728 */
3bd4df38 3729 ASSERT(ZIO_HAS_ALLOCATOR(zio));
b128c09f 3730 error = metaslab_alloc(spa, mc, zio->io_size, bp,
4e21fd06 3731 zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
492f64e9 3732 &zio->io_alloc_list, zio, zio->io_allocator);
34dc7c2f 3733
cc99f275
DB
3734 /*
3735 * Fallback to normal class when an alloc class is full
3736 */
3737 if (error == ENOSPC && mc != spa_normal_class(spa)) {
3738 /*
3739 * If throttling, transfer reservation over to normal class.
3740 * The io_allocator slot can remain the same even though we
3741 * are switching classes.
3742 */
3743 if (mc->mc_alloc_throttle_enabled &&
3744 (zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) {
3745 metaslab_class_throttle_unreserve(mc,
3746 zio->io_prop.zp_copies, zio->io_allocator, zio);
3747 zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING;
3748
aa755b35
MA
3749 VERIFY(metaslab_class_throttle_reserve(
3750 spa_normal_class(spa),
cc99f275
DB
3751 zio->io_prop.zp_copies, zio->io_allocator, zio,
3752 flags | METASLAB_MUST_RESERVE));
cc99f275 3753 }
aa755b35
MA
3754 zio->io_metaslab_class = mc = spa_normal_class(spa);
3755 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
3756 zfs_dbgmsg("%s: metaslab allocation failure, "
3757 "trying normal class: zio %px, size %llu, error %d",
8e739b2c
RE
3758 spa_name(spa), zio, (u_longlong_t)zio->io_size,
3759 error);
aa755b35 3760 }
cc99f275
DB
3761
3762 error = metaslab_alloc(spa, mc, zio->io_size, bp,
3763 zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
3764 &zio->io_alloc_list, zio, zio->io_allocator);
3765 }
3766
aa755b35
MA
3767 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) {
3768 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
3769 zfs_dbgmsg("%s: metaslab allocation failure, "
3770 "trying ganging: zio %px, size %llu, error %d",
8e739b2c
RE
3771 spa_name(spa), zio, (u_longlong_t)zio->io_size,
3772 error);
aa755b35
MA
3773 }
3774 return (zio_write_gang_block(zio, mc));
3775 }
3dfb57a3 3776 if (error != 0) {
aa755b35
MA
3777 if (error != ENOSPC ||
3778 (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC)) {
3779 zfs_dbgmsg("%s: metaslab allocation failure: zio %px, "
3780 "size %llu, error %d",
8e739b2c
RE
3781 spa_name(spa), zio, (u_longlong_t)zio->io_size,
3782 error);
aa755b35 3783 }
34dc7c2f
BB
3784 zio->io_error = error;
3785 }
3786
62840030 3787 return (zio);
34dc7c2f
BB
3788}
3789
62840030 3790static zio_t *
34dc7c2f
BB
3791zio_dva_free(zio_t *zio)
3792{
b128c09f 3793 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE);
34dc7c2f 3794
62840030 3795 return (zio);
34dc7c2f
BB
3796}
3797
62840030 3798static zio_t *
34dc7c2f
BB
3799zio_dva_claim(zio_t *zio)
3800{
b128c09f
BB
3801 int error;
3802
3803 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg);
3804 if (error)
3805 zio->io_error = error;
34dc7c2f 3806
62840030 3807 return (zio);
34dc7c2f
BB
3808}
3809
b128c09f
BB
3810/*
3811 * Undo an allocation. This is used by zio_done() when an I/O fails
3812 * and we want to give back the block we just allocated.
3813 * This handles both normal blocks and gang blocks.
3814 */
3815static void
3816zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
3817{
493fcce9 3818 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg || BP_IS_HOLE(bp));
428870ff 3819 ASSERT(zio->io_bp_override == NULL);
b128c09f 3820
493fcce9
GW
3821 if (!BP_IS_HOLE(bp)) {
3822 metaslab_free(zio->io_spa, bp, BP_GET_LOGICAL_BIRTH(bp),
3823 B_TRUE);
3824 }
b128c09f
BB
3825
3826 if (gn != NULL) {
1c27024e 3827 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
b128c09f
BB
3828 zio_dva_unallocate(zio, gn->gn_child[g],
3829 &gn->gn_gbh->zg_blkptr[g]);
3830 }
3831 }
3832}
3833
3834/*
3835 * Try to allocate an intent log block. Return 0 on success, errno on failure.
3836 */
3837int
b5256303
TC
3838zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
3839 uint64_t size, boolean_t *slog)
b128c09f 3840{
428870ff 3841 int error = 1;
4e21fd06 3842 zio_alloc_list_t io_alloc_list;
b128c09f 3843
428870ff
BB
3844 ASSERT(txg > spa_syncing_txg(spa));
3845
4e21fd06 3846 metaslab_trace_init(&io_alloc_list);
cc99f275
DB
3847
3848 /*
3849 * Block pointer fields are useful to metaslabs for stats and debugging.
3850 * Fill in the obvious ones before calling into metaslab_alloc().
3851 */
3852 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
3853 BP_SET_PSIZE(new_bp, size);
3854 BP_SET_LEVEL(new_bp, 0);
3855
492f64e9
PD
3856 /*
3857 * When allocating a zil block, we don't have information about
3858 * the final destination of the block except the objset it's part
3859 * of, so we just hash the objset ID to pick the allocator to get
3860 * some parallelism.
3861 */
b22bab25 3862 int flags = METASLAB_ZIL;
1b50749c
AM
3863 int allocator = (uint_t)cityhash4(0, 0, 0,
3864 os->os_dsl_dataset->ds_object) % spa->spa_alloc_count;
aa755b35
MA
3865 error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
3866 txg, NULL, flags, &io_alloc_list, NULL, allocator);
3867 *slog = (error == 0);
3868 if (error != 0) {
3869 error = metaslab_alloc(spa, spa_embedded_log_class(spa), size,
3870 new_bp, 1, txg, NULL, flags,
3871 &io_alloc_list, NULL, allocator);
3872 }
3873 if (error != 0) {
3874 error = metaslab_alloc(spa, spa_normal_class(spa), size,
3875 new_bp, 1, txg, NULL, flags,
3876 &io_alloc_list, NULL, allocator);
ebf8e3a2 3877 }
4e21fd06 3878 metaslab_trace_fini(&io_alloc_list);
b128c09f
BB
3879
3880 if (error == 0) {
3881 BP_SET_LSIZE(new_bp, size);
3882 BP_SET_PSIZE(new_bp, size);
3883 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
428870ff
BB
3884 BP_SET_CHECKSUM(new_bp,
3885 spa_version(spa) >= SPA_VERSION_SLIM_ZIL
3886 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
b128c09f
BB
3887 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
3888 BP_SET_LEVEL(new_bp, 0);
428870ff 3889 BP_SET_DEDUP(new_bp, 0);
b128c09f 3890 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
b5256303
TC
3891
3892 /*
3893 * encrypted blocks will require an IV and salt. We generate
3894 * these now since we will not be rewriting the bp at
3895 * rewrite time.
3896 */
3897 if (os->os_encrypted) {
3898 uint8_t iv[ZIO_DATA_IV_LEN];
3899 uint8_t salt[ZIO_DATA_SALT_LEN];
3900
3901 BP_SET_CRYPT(new_bp, B_TRUE);
3902 VERIFY0(spa_crypt_get_salt(spa,
3903 dmu_objset_id(os), salt));
3904 VERIFY0(zio_crypt_generate_iv(iv));
3905
3906 zio_crypt_encode_params_bp(new_bp, salt, iv);
3907 }
1ce23dca
PS
3908 } else {
3909 zfs_dbgmsg("%s: zil block allocation failure: "
8e739b2c
RE
3910 "size %llu, error %d", spa_name(spa), (u_longlong_t)size,
3911 error);
b128c09f
BB
3912 }
3913
3914 return (error);
3915}
3916
34dc7c2f
BB
3917/*
3918 * ==========================================================================
3919 * Read and write to physical devices
3920 * ==========================================================================
3921 */
98b25418 3922
98b25418
GW
3923/*
3924 * Issue an I/O to the underlying vdev. Typically the issue pipeline
3925 * stops after this stage and will resume upon I/O completion.
3926 * However, there are instances where the vdev layer may need to
3927 * continue the pipeline when an I/O was not issued. Since the I/O
3928 * that was sent to the vdev layer might be different than the one
3929 * currently active in the pipeline (see vdev_queue_io()), we explicitly
3930 * force the underlying vdev layers to call either zio_execute() or
3931 * zio_interrupt() to ensure that the pipeline continues with the correct I/O.
3932 */
62840030 3933static zio_t *
34dc7c2f
BB
3934zio_vdev_io_start(zio_t *zio)
3935{
3936 vdev_t *vd = zio->io_vd;
34dc7c2f
BB
3937 uint64_t align;
3938 spa_t *spa = zio->io_spa;
3939
193a37cb
TH
3940 zio->io_delay = 0;
3941
b128c09f
BB
3942 ASSERT(zio->io_error == 0);
3943 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0);
34dc7c2f 3944
b128c09f
BB
3945 if (vd == NULL) {
3946 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
3947 spa_config_enter(spa, SCL_ZIO, zio, RW_READER);
34dc7c2f 3948
b128c09f
BB
3949 /*
3950 * The mirror_ops handle multiple DVAs in a single BP.
3951 */
98b25418 3952 vdev_mirror_ops.vdev_op_io_start(zio);
62840030 3953 return (NULL);
34dc7c2f
BB
3954 }
3955
3dfb57a3 3956 ASSERT3P(zio->io_logical, !=, zio);
6cb8e530
PZ
3957 if (zio->io_type == ZIO_TYPE_WRITE) {
3958 ASSERT(spa->spa_trust_config);
3959
a1d477c2
MA
3960 /*
3961 * Note: the code can handle other kinds of writes,
3962 * but we don't expect them.
3963 */
2a673e76 3964 if (zio->io_vd->vdev_noalloc) {
6cb8e530
PZ
3965 ASSERT(zio->io_flags &
3966 (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL |
3967 ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE));
3968 }
a1d477c2 3969 }
3dfb57a3 3970
b128c09f
BB
3971 align = 1ULL << vd->vdev_top->vdev_ashift;
3972
b02fe35d
AR
3973 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) &&
3974 P2PHASE(zio->io_size, align) != 0) {
3975 /* Transform logical writes to be a full physical block size. */
34dc7c2f 3976 uint64_t asize = P2ROUNDUP(zio->io_size, align);
a6255b7f 3977 abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize);
178e73b3 3978 ASSERT(vd == vd->vdev_top);
34dc7c2f 3979 if (zio->io_type == ZIO_TYPE_WRITE) {
a6255b7f
DQ
3980 abd_copy(abuf, zio->io_abd, zio->io_size);
3981 abd_zero_off(abuf, zio->io_size, asize - zio->io_size);
34dc7c2f 3982 }
b128c09f 3983 zio_push_transform(zio, abuf, asize, asize, zio_subblock);
34dc7c2f
BB
3984 }
3985
b02fe35d
AR
3986 /*
3987 * If this is not a physical io, make sure that it is properly aligned
3988 * before proceeding.
3989 */
3990 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) {
3991 ASSERT0(P2PHASE(zio->io_offset, align));
3992 ASSERT0(P2PHASE(zio->io_size, align));
3993 } else {
3994 /*
3995 * For physical writes, we allow 512b aligned writes and assume
3996 * the device will perform a read-modify-write as necessary.
3997 */
3998 ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE));
3999 ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE));
4000 }
4001
572e2857 4002 VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa));
fb5f0bc8
BB
4003
4004 /*
4005 * If this is a repair I/O, and there's no self-healing involved --
4006 * that is, we're just resilvering what we expect to resilver --
4007 * then don't do the I/O unless zio's txg is actually in vd's DTL.
9e052db4
MA
4008 * This prevents spurious resilvering.
4009 *
4010 * There are a few ways that we can end up creating these spurious
4011 * resilver i/os:
4012 *
4013 * 1. A resilver i/o will be issued if any DVA in the BP has a
4014 * dirty DTL. The mirror code will issue resilver writes to
4015 * each DVA, including the one(s) that are not on vdevs with dirty
4016 * DTLs.
4017 *
4018 * 2. With nested replication, which happens when we have a
4019 * "replacing" or "spare" vdev that's a child of a mirror or raidz.
4020 * For example, given mirror(replacing(A+B), C), it's likely that
4021 * only A is out of date (it's the new device). In this case, we'll
4022 * read from C, then use the data to resilver A+B -- but we don't
4023 * actually want to resilver B, just A. The top-level mirror has no
4024 * way to know this, so instead we just discard unnecessary repairs
4025 * as we work our way down the vdev tree.
4026 *
4027 * 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc.
4028 * The same logic applies to any form of nested replication: ditto
4029 * + mirror, RAID-Z + replacing, etc.
4030 *
4031 * However, indirect vdevs point off to other vdevs which may have
4032 * DTL's, so we never bypass them. The child i/os on concrete vdevs
4033 * will be properly bypassed instead.
b2255edc
BB
4034 *
4035 * Leaf DTL_PARTIAL can be empty when a legitimate write comes from
4036 * a dRAID spare vdev. For example, when a dRAID spare is first
4037 * used, its spare blocks need to be written to but the leaf vdev's
4038 * of such blocks can have empty DTL_PARTIAL.
4039 *
4040 * There seemed no clean way to allow such writes while bypassing
4041 * spurious ones. At this point, just avoid all bypassing for dRAID
4042 * for correctness.
fb5f0bc8
BB
4043 */
4044 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) &&
4045 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) &&
4046 zio->io_txg != 0 && /* not a delegated i/o */
9e052db4 4047 vd->vdev_ops != &vdev_indirect_ops &&
b2255edc 4048 vd->vdev_top->vdev_ops != &vdev_draid_ops &&
fb5f0bc8
BB
4049 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) {
4050 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
fb5f0bc8 4051 zio_vdev_io_bypass(zio);
62840030 4052 return (zio);
fb5f0bc8 4053 }
34dc7c2f 4054
b2255edc
BB
4055 /*
4056 * Select the next best leaf I/O to process. Distributed spares are
4057 * excluded since they dispatch the I/O directly to a leaf vdev after
4058 * applying the dRAID mapping.
4059 */
4060 if (vd->vdev_ops->vdev_op_leaf &&
4061 vd->vdev_ops != &vdev_draid_spare_ops &&
4062 (zio->io_type == ZIO_TYPE_READ ||
4063 zio->io_type == ZIO_TYPE_WRITE ||
4064 zio->io_type == ZIO_TYPE_TRIM)) {
b128c09f 4065
4725e543
RN
4066 if (zio_handle_device_injection(vd, zio, ENOSYS) != 0) {
4067 /*
4068 * "no-op" injections return success, but do no actual
4069 * work. Just skip the remaining vdev stages.
4070 */
4071 zio_vdev_io_bypass(zio);
4072 zio_interrupt(zio);
4073 return (NULL);
4074 }
4075
b128c09f 4076 if ((zio = vdev_queue_io(zio)) == NULL)
62840030 4077 return (NULL);
b128c09f
BB
4078
4079 if (!vdev_accessible(vd, zio)) {
2e528b49 4080 zio->io_error = SET_ERROR(ENXIO);
b128c09f 4081 zio_interrupt(zio);
62840030 4082 return (NULL);
b128c09f 4083 }
67103816 4084 zio->io_delay = gethrtime();
b128c09f
BB
4085 }
4086
98b25418 4087 vd->vdev_ops->vdev_op_io_start(zio);
62840030 4088 return (NULL);
34dc7c2f
BB
4089}
4090
62840030 4091static zio_t *
34dc7c2f
BB
4092zio_vdev_io_done(zio_t *zio)
4093{
b128c09f
BB
4094 vdev_t *vd = zio->io_vd;
4095 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops;
4096 boolean_t unexpected_error = B_FALSE;
34dc7c2f 4097
ddc751d5 4098 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
62840030 4099 return (NULL);
ddc751d5 4100 }
34dc7c2f 4101
1b939560 4102 ASSERT(zio->io_type == ZIO_TYPE_READ ||
76d1dde9 4103 zio->io_type == ZIO_TYPE_WRITE ||
d7605ae7 4104 zio->io_type == ZIO_TYPE_FLUSH ||
76d1dde9 4105 zio->io_type == ZIO_TYPE_TRIM);
b128c09f 4106
193a37cb
TH
4107 if (zio->io_delay)
4108 zio->io_delay = gethrtime() - zio->io_delay;
4109
b2255edc
BB
4110 if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
4111 vd->vdev_ops != &vdev_draid_spare_ops) {
d7605ae7 4112 if (zio->io_type != ZIO_TYPE_FLUSH)
76d1dde9 4113 vdev_queue_io_done(zio);
b128c09f 4114
b128c09f 4115 if (zio_injection_enabled && zio->io_error == 0)
d977122d
DB
4116 zio->io_error = zio_handle_device_injections(vd, zio,
4117 EIO, EILSEQ);
b128c09f
BB
4118
4119 if (zio_injection_enabled && zio->io_error == 0)
4120 zio->io_error = zio_handle_label_injection(zio, EIO);
4121
9f83eec0
AM
4122 if (zio->io_error && zio->io_type != ZIO_TYPE_FLUSH &&
4123 zio->io_type != ZIO_TYPE_TRIM) {
b128c09f 4124 if (!vdev_accessible(vd, zio)) {
2e528b49 4125 zio->io_error = SET_ERROR(ENXIO);
b128c09f
BB
4126 } else {
4127 unexpected_error = B_TRUE;
4128 }
4129 }
4130 }
4131
4132 ops->vdev_op_io_done(zio);
34dc7c2f 4133
55c12724 4134 if (unexpected_error && vd->vdev_remove_wanted == B_FALSE)
d164b209 4135 VERIFY(vdev_probe(vd, zio) == NULL);
34dc7c2f 4136
62840030 4137 return (zio);
34dc7c2f
BB
4138}
4139
a8b2e306
TC
4140/*
4141 * This function is used to change the priority of an existing zio that is
4142 * currently in-flight. This is used by the arc to upgrade priority in the
4143 * event that a demand read is made for a block that is currently queued
4144 * as a scrub or async read IO. Otherwise, the high priority read request
4145 * would end up having to wait for the lower priority IO.
4146 */
4147void
4148zio_change_priority(zio_t *pio, zio_priority_t priority)
4149{
4150 zio_t *cio, *cio_next;
4151 zio_link_t *zl = NULL;
4152
4153 ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
4154
4155 if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) {
4156 vdev_queue_change_io_priority(pio, priority);
4157 } else {
4158 pio->io_priority = priority;
4159 }
4160
4161 mutex_enter(&pio->io_lock);
4162 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
4163 cio_next = zio_walk_children(pio, &zl);
4164 zio_change_priority(cio, priority);
4165 }
4166 mutex_exit(&pio->io_lock);
4167}
4168
428870ff
BB
4169/*
4170 * For non-raidz ZIOs, we can just copy aside the bad data read from the
4171 * disk, and use that to finish the checksum ereport later.
4172 */
4173static void
4174zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr,
84c07ada 4175 const abd_t *good_buf)
428870ff
BB
4176{
4177 /* no processing needed */
4178 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE);
4179}
4180
428870ff 4181void
330c6c05 4182zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr)
428870ff 4183{
84c07ada 4184 void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size);
428870ff 4185
84c07ada 4186 abd_copy(abd, zio->io_abd, zio->io_size);
428870ff
BB
4187
4188 zcr->zcr_cbinfo = zio->io_size;
84c07ada 4189 zcr->zcr_cbdata = abd;
428870ff 4190 zcr->zcr_finish = zio_vsd_default_cksum_finish;
84c07ada 4191 zcr->zcr_free = zio_abd_free;
428870ff
BB
4192}
4193
62840030 4194static zio_t *
34dc7c2f
BB
4195zio_vdev_io_assess(zio_t *zio)
4196{
4197 vdev_t *vd = zio->io_vd;
b128c09f 4198
ddc751d5 4199 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
62840030 4200 return (NULL);
ddc751d5 4201 }
b128c09f
BB
4202
4203 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
4204 spa_config_exit(zio->io_spa, SCL_ZIO, zio);
4205
4206 if (zio->io_vsd != NULL) {
428870ff 4207 zio->io_vsd_ops->vsd_free(zio);
b128c09f 4208 zio->io_vsd = NULL;
34dc7c2f
BB
4209 }
4210
b128c09f 4211 if (zio_injection_enabled && zio->io_error == 0)
34dc7c2f
BB
4212 zio->io_error = zio_handle_fault_injection(zio, EIO);
4213
4214 /*
4215 * If the I/O failed, determine whether we should attempt to retry it.
428870ff
BB
4216 *
4217 * On retry, we cut in line in the issue queue, since we don't want
4218 * compression/checksumming/etc. work to prevent our (cheap) IO reissue.
34dc7c2f 4219 */
b128c09f
BB
4220 if (zio->io_error && vd == NULL &&
4221 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) {
4222 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */
4223 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */
34dc7c2f 4224 zio->io_error = 0;
70ea484e 4225 zio->io_flags |= ZIO_FLAG_IO_RETRY | ZIO_FLAG_DONT_AGGREGATE;
428870ff
BB
4226 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1;
4227 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE,
4228 zio_requeue_io_start_cut_in_line);
62840030 4229 return (NULL);
34dc7c2f
BB
4230 }
4231
b128c09f
BB
4232 /*
4233 * If we got an error on a leaf device, convert it to ENXIO
4234 * if the device is not accessible at all.
4235 */
4236 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf &&
4237 !vdev_accessible(vd, zio))
2e528b49 4238 zio->io_error = SET_ERROR(ENXIO);
b128c09f
BB
4239
4240 /*
4241 * If we can't write to an interior vdev (mirror or RAID-Z),
4242 * set vdev_cant_write so that we stop trying to allocate from it.
4243 */
4244 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE &&
13fe0198 4245 vd != NULL && !vd->vdev_ops->vdev_op_leaf) {
2b56a634
MA
4246 vdev_dbgmsg(vd, "zio_vdev_io_assess(zio=%px) setting "
4247 "cant_write=TRUE due to write failure with ENXIO",
4248 zio);
b128c09f 4249 vd->vdev_cant_write = B_TRUE;
13fe0198 4250 }
b128c09f 4251
298ec40b
GM
4252 /*
4253 * If a cache flush returns ENOTSUP or ENOTTY, we know that no future
1b939560
BB
4254 * attempts will ever succeed. In this case we set a persistent
4255 * boolean flag so that we don't bother with it in the future.
298ec40b
GM
4256 */
4257 if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) &&
d7605ae7 4258 zio->io_type == ZIO_TYPE_FLUSH && vd != NULL)
298ec40b
GM
4259 vd->vdev_nowritecache = B_TRUE;
4260
b128c09f
BB
4261 if (zio->io_error)
4262 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
4263
62840030 4264 return (zio);
34dc7c2f
BB
4265}
4266
4267void
4268zio_vdev_io_reissue(zio_t *zio)
4269{
4270 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
4271 ASSERT(zio->io_error == 0);
4272
428870ff 4273 zio->io_stage >>= 1;
34dc7c2f
BB
4274}
4275
4276void
4277zio_vdev_io_redone(zio_t *zio)
4278{
4279 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE);
4280
428870ff 4281 zio->io_stage >>= 1;
34dc7c2f
BB
4282}
4283
4284void
4285zio_vdev_io_bypass(zio_t *zio)
4286{
4287 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
4288 ASSERT(zio->io_error == 0);
4289
4290 zio->io_flags |= ZIO_FLAG_IO_BYPASS;
428870ff 4291 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1;
34dc7c2f
BB
4292}
4293
b5256303
TC
4294/*
4295 * ==========================================================================
4296 * Encrypt and store encryption parameters
4297 * ==========================================================================
4298 */
4299
4300
4301/*
4302 * This function is used for ZIO_STAGE_ENCRYPT. It is responsible for
4303 * managing the storage of encryption parameters and passing them to the
4304 * lower-level encryption functions.
4305 */
62840030 4306static zio_t *
b5256303
TC
4307zio_encrypt(zio_t *zio)
4308{
4309 zio_prop_t *zp = &zio->io_prop;
4310 spa_t *spa = zio->io_spa;
4311 blkptr_t *bp = zio->io_bp;
4312 uint64_t psize = BP_GET_PSIZE(bp);
ae76f45c 4313 uint64_t dsobj = zio->io_bookmark.zb_objset;
b5256303
TC
4314 dmu_object_type_t ot = BP_GET_TYPE(bp);
4315 void *enc_buf = NULL;
4316 abd_t *eabd = NULL;
4317 uint8_t salt[ZIO_DATA_SALT_LEN];
4318 uint8_t iv[ZIO_DATA_IV_LEN];
4319 uint8_t mac[ZIO_DATA_MAC_LEN];
4320 boolean_t no_crypt = B_FALSE;
4321
4322 /* the root zio already encrypted the data */
4323 if (zio->io_child_type == ZIO_CHILD_GANG)
62840030 4324 return (zio);
b5256303
TC
4325
4326 /* only ZIL blocks are re-encrypted on rewrite */
4327 if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG)
62840030 4328 return (zio);
b5256303
TC
4329
4330 if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) {
4331 BP_SET_CRYPT(bp, B_FALSE);
62840030 4332 return (zio);
b5256303
TC
4333 }
4334
4335 /* if we are doing raw encryption set the provided encryption params */
4336 if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) {
ae76f45c 4337 ASSERT0(BP_GET_LEVEL(bp));
b5256303
TC
4338 BP_SET_CRYPT(bp, B_TRUE);
4339 BP_SET_BYTEORDER(bp, zp->zp_byteorder);
4340 if (ot != DMU_OT_OBJSET)
4341 zio_crypt_encode_mac_bp(bp, zp->zp_mac);
ae76f45c
TC
4342
4343 /* dnode blocks must be written out in the provided byteorder */
4344 if (zp->zp_byteorder != ZFS_HOST_BYTEORDER &&
4345 ot == DMU_OT_DNODE) {
4346 void *bswap_buf = zio_buf_alloc(psize);
4347 abd_t *babd = abd_get_from_buf(bswap_buf, psize);
4348
4349 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
4350 abd_copy_to_buf(bswap_buf, zio->io_abd, psize);
4351 dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf,
4352 psize);
4353
4354 abd_take_ownership_of_buf(babd, B_TRUE);
4355 zio_push_transform(zio, babd, psize, psize, NULL);
4356 }
4357
b5256303
TC
4358 if (DMU_OT_IS_ENCRYPTED(ot))
4359 zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv);
62840030 4360 return (zio);
b5256303
TC
4361 }
4362
4363 /* indirect blocks only maintain a cksum of the lower level MACs */
4364 if (BP_GET_LEVEL(bp) > 0) {
4365 BP_SET_CRYPT(bp, B_TRUE);
4366 VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE,
4367 zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp),
4368 mac));
4369 zio_crypt_encode_mac_bp(bp, mac);
62840030 4370 return (zio);
b5256303
TC
4371 }
4372
4373 /*
4374 * Objset blocks are a special case since they have 2 256-bit MACs
4375 * embedded within them.
4376 */
4377 if (ot == DMU_OT_OBJSET) {
4378 ASSERT0(DMU_OT_IS_ENCRYPTED(ot));
4379 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
4380 BP_SET_CRYPT(bp, B_TRUE);
ae76f45c
TC
4381 VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj,
4382 zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp)));
62840030 4383 return (zio);
b5256303
TC
4384 }
4385
4386 /* unencrypted object types are only authenticated with a MAC */
4387 if (!DMU_OT_IS_ENCRYPTED(ot)) {
4388 BP_SET_CRYPT(bp, B_TRUE);
ae76f45c
TC
4389 VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj,
4390 zio->io_abd, psize, mac));
b5256303 4391 zio_crypt_encode_mac_bp(bp, mac);
62840030 4392 return (zio);
b5256303
TC
4393 }
4394
4395 /*
4396 * Later passes of sync-to-convergence may decide to rewrite data
4397 * in place to avoid more disk reallocations. This presents a problem
d611989f 4398 * for encryption because this constitutes rewriting the new data with
b5256303
TC
4399 * the same encryption key and IV. However, this only applies to blocks
4400 * in the MOS (particularly the spacemaps) and we do not encrypt the
4401 * MOS. We assert that the zio is allocating or an intent log write
4402 * to enforce this.
4403 */
4404 ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG);
4405 ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG);
4406 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION));
4407 ASSERT3U(psize, !=, 0);
4408
4409 enc_buf = zio_buf_alloc(psize);
4410 eabd = abd_get_from_buf(enc_buf, psize);
4411 abd_take_ownership_of_buf(eabd, B_TRUE);
4412
4413 /*
4414 * For an explanation of what encryption parameters are stored
4415 * where, see the block comment in zio_crypt.c.
4416 */
4417 if (ot == DMU_OT_INTENT_LOG) {
4418 zio_crypt_decode_params_bp(bp, salt, iv);
4419 } else {
4420 BP_SET_CRYPT(bp, B_TRUE);
4421 }
4422
4423 /* Perform the encryption. This should not fail */
be9a5c35
TC
4424 VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark,
4425 BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp),
4426 salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt));
b5256303
TC
4427
4428 /* encode encryption metadata into the bp */
4429 if (ot == DMU_OT_INTENT_LOG) {
4430 /*
4431 * ZIL blocks store the MAC in the embedded checksum, so the
4432 * transform must always be applied.
4433 */
4434 zio_crypt_encode_mac_zil(enc_buf, mac);
4435 zio_push_transform(zio, eabd, psize, psize, NULL);
4436 } else {
4437 BP_SET_CRYPT(bp, B_TRUE);
4438 zio_crypt_encode_params_bp(bp, salt, iv);
4439 zio_crypt_encode_mac_bp(bp, mac);
4440
4441 if (no_crypt) {
4442 ASSERT3U(ot, ==, DMU_OT_DNODE);
4443 abd_free(eabd);
4444 } else {
4445 zio_push_transform(zio, eabd, psize, psize, NULL);
4446 }
4447 }
4448
62840030 4449 return (zio);
b5256303
TC
4450}
4451
34dc7c2f
BB
4452/*
4453 * ==========================================================================
4454 * Generate and verify checksums
4455 * ==========================================================================
4456 */
62840030 4457static zio_t *
34dc7c2f
BB
4458zio_checksum_generate(zio_t *zio)
4459{
34dc7c2f 4460 blkptr_t *bp = zio->io_bp;
b128c09f 4461 enum zio_checksum checksum;
34dc7c2f 4462
b128c09f
BB
4463 if (bp == NULL) {
4464 /*
4465 * This is zio_write_phys().
4466 * We're either generating a label checksum, or none at all.
4467 */
4468 checksum = zio->io_prop.zp_checksum;
34dc7c2f 4469
b128c09f 4470 if (checksum == ZIO_CHECKSUM_OFF)
62840030 4471 return (zio);
b128c09f
BB
4472
4473 ASSERT(checksum == ZIO_CHECKSUM_LABEL);
4474 } else {
4475 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) {
4476 ASSERT(!IO_IS_ALLOCATING(zio));
4477 checksum = ZIO_CHECKSUM_GANG_HEADER;
4478 } else {
4479 checksum = BP_GET_CHECKSUM(bp);
4480 }
4481 }
34dc7c2f 4482
a6255b7f 4483 zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size);
34dc7c2f 4484
62840030 4485 return (zio);
34dc7c2f
BB
4486}
4487
62840030 4488static zio_t *
b128c09f 4489zio_checksum_verify(zio_t *zio)
34dc7c2f 4490{
428870ff 4491 zio_bad_cksum_t info;
b128c09f
BB
4492 blkptr_t *bp = zio->io_bp;
4493 int error;
34dc7c2f 4494
428870ff
BB
4495 ASSERT(zio->io_vd != NULL);
4496
b128c09f
BB
4497 if (bp == NULL) {
4498 /*
4499 * This is zio_read_phys().
4500 * We're either verifying a label checksum, or nothing at all.
4501 */
4502 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF)
62840030 4503 return (zio);
34dc7c2f 4504
b2255edc 4505 ASSERT3U(zio->io_prop.zp_checksum, ==, ZIO_CHECKSUM_LABEL);
b128c09f 4506 }
34dc7c2f 4507
428870ff 4508 if ((error = zio_checksum_error(zio, &info)) != 0) {
b128c09f 4509 zio->io_error = error;
7a3066ff
MA
4510 if (error == ECKSUM &&
4511 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
03e02e5b
DB
4512 mutex_enter(&zio->io_vd->vdev_stat_lock);
4513 zio->io_vd->vdev_stat.vs_checksum_errors++;
4514 mutex_exit(&zio->io_vd->vdev_stat_lock);
7a75f74c
RW
4515 (void) zfs_ereport_start_checksum(zio->io_spa,
4516 zio->io_vd, &zio->io_bookmark, zio,
4517 zio->io_offset, zio->io_size, &info);
b128c09f 4518 }
34dc7c2f
BB
4519 }
4520
62840030 4521 return (zio);
34dc7c2f
BB
4522}
4523
4524/*
4525 * Called by RAID-Z to ensure we don't compute the checksum twice.
4526 */
4527void
4528zio_checksum_verified(zio_t *zio)
4529{
428870ff 4530 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
34dc7c2f
BB
4531}
4532
4533/*
b128c09f
BB
4534 * ==========================================================================
4535 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
9b67f605 4536 * An error of 0 indicates success. ENXIO indicates whole-device failure,
d611989f 4537 * which may be transient (e.g. unplugged) or permanent. ECKSUM and EIO
b128c09f
BB
4538 * indicate errors that are specific to one I/O, and most likely permanent.
4539 * Any other error is presumed to be worse because we weren't expecting it.
4540 * ==========================================================================
34dc7c2f 4541 */
b128c09f
BB
4542int
4543zio_worst_error(int e1, int e2)
34dc7c2f 4544{
b128c09f
BB
4545 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO };
4546 int r1, r2;
4547
4548 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++)
4549 if (e1 == zio_error_rank[r1])
4550 break;
34dc7c2f 4551
b128c09f
BB
4552 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++)
4553 if (e2 == zio_error_rank[r2])
4554 break;
4555
4556 return (r1 > r2 ? e1 : e2);
34dc7c2f
BB
4557}
4558
4559/*
4560 * ==========================================================================
b128c09f 4561 * I/O completion
34dc7c2f
BB
4562 * ==========================================================================
4563 */
62840030 4564static zio_t *
b128c09f 4565zio_ready(zio_t *zio)
34dc7c2f 4566{
b128c09f 4567 blkptr_t *bp = zio->io_bp;
d164b209 4568 zio_t *pio, *pio_next;
3dfb57a3 4569 zio_link_t *zl = NULL;
34dc7c2f 4570
eda3fcd5
AM
4571 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
4572 ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT, ZIO_WAIT_READY)) {
62840030 4573 return (NULL);
ddc751d5 4574 }
34dc7c2f 4575
9babb374 4576 if (zio->io_ready) {
b128c09f 4577 ASSERT(IO_IS_ALLOCATING(zio));
493fcce9
GW
4578 ASSERT(BP_GET_LOGICAL_BIRTH(bp) == zio->io_txg ||
4579 BP_IS_HOLE(bp) || (zio->io_flags & ZIO_FLAG_NOPWRITE));
b128c09f 4580 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0);
34dc7c2f 4581
b128c09f
BB
4582 zio->io_ready(zio);
4583 }
34dc7c2f 4584
b4a08730 4585#ifdef ZFS_DEBUG
b128c09f
BB
4586 if (bp != NULL && bp != &zio->io_bp_copy)
4587 zio->io_bp_copy = *bp;
b4a08730 4588#endif
34dc7c2f 4589
3dfb57a3 4590 if (zio->io_error != 0) {
b128c09f 4591 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
34dc7c2f 4592
3dfb57a3
DB
4593 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
4594 ASSERT(IO_IS_ALLOCATING(zio));
4595 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
cc99f275 4596 ASSERT(zio->io_metaslab_class != NULL);
3bd4df38 4597 ASSERT(ZIO_HAS_ALLOCATOR(zio));
cc99f275 4598
3dfb57a3
DB
4599 /*
4600 * We were unable to allocate anything, unreserve and
4601 * issue the next I/O to allocate.
4602 */
4603 metaslab_class_throttle_unreserve(
cc99f275
DB
4604 zio->io_metaslab_class, zio->io_prop.zp_copies,
4605 zio->io_allocator, zio);
492f64e9 4606 zio_allocate_dispatch(zio->io_spa, zio->io_allocator);
3dfb57a3
DB
4607 }
4608 }
4609
d164b209
BB
4610 mutex_enter(&zio->io_lock);
4611 zio->io_state[ZIO_WAIT_READY] = 1;
3dfb57a3 4612 pio = zio_walk_parents(zio, &zl);
d164b209
BB
4613 mutex_exit(&zio->io_lock);
4614
4615 /*
4616 * As we notify zio's parents, new parents could be added.
4617 * New parents go to the head of zio's io_parent_list, however,
4618 * so we will (correctly) not notify them. The remainder of zio's
4619 * io_parent_list, from 'pio_next' onward, cannot change because
4620 * all parents must wait for us to be done before they can be done.
4621 */
4622 for (; pio != NULL; pio = pio_next) {
3dfb57a3 4623 pio_next = zio_walk_parents(zio, &zl);
62840030 4624 zio_notify_parent(pio, zio, ZIO_WAIT_READY, NULL);
d164b209 4625 }
34dc7c2f 4626
428870ff 4627 if (zio->io_flags & ZIO_FLAG_NODATA) {
7cb67d62 4628 if (bp != NULL && BP_IS_GANG(bp)) {
428870ff
BB
4629 zio->io_flags &= ~ZIO_FLAG_NODATA;
4630 } else {
a6255b7f 4631 ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE);
428870ff
BB
4632 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
4633 }
4634 }
4635
4636 if (zio_injection_enabled &&
4637 zio->io_spa->spa_syncing_txg == zio->io_txg)
4638 zio_handle_ignored_writes(zio);
4639
62840030 4640 return (zio);
34dc7c2f
BB
4641}
4642
3dfb57a3
DB
4643/*
4644 * Update the allocation throttle accounting.
4645 */
4646static void
4647zio_dva_throttle_done(zio_t *zio)
4648{
2a8ba608 4649 zio_t *lio __maybe_unused = zio->io_logical;
3dfb57a3
DB
4650 zio_t *pio = zio_unique_parent(zio);
4651 vdev_t *vd = zio->io_vd;
4652 int flags = METASLAB_ASYNC_ALLOC;
4653
4654 ASSERT3P(zio->io_bp, !=, NULL);
4655 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
4656 ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE);
4657 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
4658 ASSERT(vd != NULL);
4659 ASSERT3P(vd, ==, vd->vdev_top);
21df134f
SB
4660 ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY));
4661 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
3dfb57a3
DB
4662 ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING);
4663 ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE));
4664 ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA));
4665
4666 /*
4667 * Parents of gang children can have two flavors -- ones that
4668 * allocated the gang header (will have ZIO_FLAG_IO_REWRITE set)
4669 * and ones that allocated the constituent blocks. The allocation
4670 * throttle needs to know the allocating parent zio so we must find
4671 * it here.
4672 */
4673 if (pio->io_child_type == ZIO_CHILD_GANG) {
4674 /*
4675 * If our parent is a rewrite gang child then our grandparent
4676 * would have been the one that performed the allocation.
4677 */
4678 if (pio->io_flags & ZIO_FLAG_IO_REWRITE)
4679 pio = zio_unique_parent(pio);
4680 flags |= METASLAB_GANG_CHILD;
4681 }
4682
4683 ASSERT(IO_IS_ALLOCATING(pio));
3bd4df38 4684 ASSERT(ZIO_HAS_ALLOCATOR(pio));
3dfb57a3
DB
4685 ASSERT3P(zio, !=, zio->io_logical);
4686 ASSERT(zio->io_logical != NULL);
4687 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
4688 ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE);
cc99f275 4689 ASSERT(zio->io_metaslab_class != NULL);
3dfb57a3
DB
4690
4691 mutex_enter(&pio->io_lock);
492f64e9
PD
4692 metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags,
4693 pio->io_allocator, B_TRUE);
3dfb57a3
DB
4694 mutex_exit(&pio->io_lock);
4695
cc99f275
DB
4696 metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1,
4697 pio->io_allocator, pio);
3dfb57a3
DB
4698
4699 /*
4700 * Call into the pipeline to see if there is more work that
4701 * needs to be done. If there is work to be done it will be
4702 * dispatched to another taskq thread.
4703 */
492f64e9 4704 zio_allocate_dispatch(zio->io_spa, pio->io_allocator);
3dfb57a3
DB
4705}
4706
62840030 4707static zio_t *
b128c09f 4708zio_done(zio_t *zio)
34dc7c2f 4709{
3dfb57a3
DB
4710 /*
4711 * Always attempt to keep stack usage minimal here since
d611989f 4712 * we can be called recursively up to 19 levels deep.
3dfb57a3 4713 */
84c07ada 4714 const uint64_t psize = zio->io_size;
d164b209 4715 zio_t *pio, *pio_next;
3dfb57a3 4716 zio_link_t *zl = NULL;
34dc7c2f 4717
b128c09f 4718 /*
9babb374 4719 * If our children haven't all completed,
b128c09f
BB
4720 * wait for them and then repeat this pipeline stage.
4721 */
ddc751d5 4722 if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) {
62840030 4723 return (NULL);
ddc751d5 4724 }
34dc7c2f 4725
3dfb57a3
DB
4726 /*
4727 * If the allocation throttle is enabled, then update the accounting.
4728 * We only track child I/Os that are part of an allocating async
4729 * write. We must do this since the allocation is performed
4730 * by the logical I/O but the actual write is done by child I/Os.
4731 */
4732 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING &&
4733 zio->io_child_type == ZIO_CHILD_VDEV) {
cc99f275
DB
4734 ASSERT(zio->io_metaslab_class != NULL);
4735 ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled);
3dfb57a3
DB
4736 zio_dva_throttle_done(zio);
4737 }
4738
4739 /*
4740 * If the allocation throttle is enabled, verify that
4741 * we have decremented the refcounts for every I/O that was throttled.
4742 */
4743 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
4744 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
4745 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
4746 ASSERT(zio->io_bp != NULL);
3bd4df38 4747 ASSERT(ZIO_HAS_ALLOCATOR(zio));
cc99f275 4748
492f64e9
PD
4749 metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio,
4750 zio->io_allocator);
f8020c93
AM
4751 VERIFY(zfs_refcount_not_held(&zio->io_metaslab_class->
4752 mc_allocator[zio->io_allocator].mca_alloc_slots, zio));
3dfb57a3
DB
4753 }
4754
4755
1c27024e
DB
4756 for (int c = 0; c < ZIO_CHILD_TYPES; c++)
4757 for (int w = 0; w < ZIO_WAIT_TYPES; w++)
b128c09f
BB
4758 ASSERT(zio->io_children[c][w] == 0);
4759
9b67f605 4760 if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) {
c776b317
BB
4761 ASSERT(zio->io_bp->blk_pad[0] == 0);
4762 ASSERT(zio->io_bp->blk_pad[1] == 0);
861166b0 4763 ASSERT(memcmp(zio->io_bp, &zio->io_bp_copy,
d1d7e268 4764 sizeof (blkptr_t)) == 0 ||
c776b317
BB
4765 (zio->io_bp == zio_unique_parent(zio)->io_bp));
4766 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) &&
428870ff 4767 zio->io_bp_override == NULL &&
b128c09f 4768 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) {
d1d7e268
MK
4769 ASSERT3U(zio->io_prop.zp_copies, <=,
4770 BP_GET_NDVAS(zio->io_bp));
c776b317 4771 ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 ||
d1d7e268
MK
4772 (BP_COUNT_GANG(zio->io_bp) ==
4773 BP_GET_NDVAS(zio->io_bp)));
b128c09f 4774 }
03c6040b
GW
4775 if (zio->io_flags & ZIO_FLAG_NOPWRITE)
4776 VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
b128c09f
BB
4777 }
4778
4779 /*
428870ff 4780 * If there were child vdev/gang/ddt errors, they apply to us now.
b128c09f
BB
4781 */
4782 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV);
4783 zio_inherit_child_errors(zio, ZIO_CHILD_GANG);
428870ff
BB
4784 zio_inherit_child_errors(zio, ZIO_CHILD_DDT);
4785
4786 /*
4787 * If the I/O on the transformed data was successful, generate any
4788 * checksum reports now while we still have the transformed data.
4789 */
4790 if (zio->io_error == 0) {
4791 while (zio->io_cksum_report != NULL) {
4792 zio_cksum_report_t *zcr = zio->io_cksum_report;
4793 uint64_t align = zcr->zcr_align;
a6255b7f 4794 uint64_t asize = P2ROUNDUP(psize, align);
a6255b7f
DQ
4795 abd_t *adata = zio->io_abd;
4796
f2286383 4797 if (adata != NULL && asize != psize) {
84c07ada 4798 adata = abd_alloc(asize, B_TRUE);
a6255b7f
DQ
4799 abd_copy(adata, zio->io_abd, psize);
4800 abd_zero_off(adata, psize, asize - psize);
428870ff
BB
4801 }
4802
4803 zio->io_cksum_report = zcr->zcr_next;
4804 zcr->zcr_next = NULL;
84c07ada 4805 zcr->zcr_finish(zcr, adata);
428870ff
BB
4806 zfs_ereport_free_checksum(zcr);
4807
f2286383 4808 if (adata != NULL && asize != psize)
a6255b7f 4809 abd_free(adata);
428870ff
BB
4810 }
4811 }
b128c09f
BB
4812
4813 zio_pop_transforms(zio); /* note: may set zio->io_error */
4814
a6255b7f 4815 vdev_stat_update(zio, psize);
b128c09f 4816
a69052be 4817 /*
cc92e9d0 4818 * If this I/O is attached to a particular vdev is slow, exceeding
72f53c56
MJ
4819 * 30 seconds to complete, post an error described the I/O delay.
4820 * We ignore these errors if the device is currently unavailable.
a69052be 4821 */
ad796b8a
TH
4822 if (zio->io_delay >= MSEC2NSEC(zio_slow_io_ms)) {
4823 if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) {
4824 /*
4825 * We want to only increment our slow IO counters if
4826 * the IO is valid (i.e. not if the drive is removed).
4827 *
4828 * zfs_ereport_post() will also do these checks, but
4829 * it can also ratelimit and have other failures, so we
4830 * need to increment the slow_io counters independent
4831 * of it.
4832 */
4833 if (zfs_ereport_is_valid(FM_EREPORT_ZFS_DELAY,
4834 zio->io_spa, zio->io_vd, zio)) {
4835 mutex_enter(&zio->io_vd->vdev_stat_lock);
4836 zio->io_vd->vdev_stat.vs_slow_ios++;
4837 mutex_exit(&zio->io_vd->vdev_stat_lock);
4838
1144586b 4839 (void) zfs_ereport_post(FM_EREPORT_ZFS_DELAY,
ad796b8a 4840 zio->io_spa, zio->io_vd, &zio->io_bookmark,
4f072827 4841 zio, 0);
ad796b8a
TH
4842 }
4843 }
72f53c56 4844 }
a69052be 4845
b128c09f
BB
4846 if (zio->io_error) {
4847 /*
4848 * If this I/O is attached to a particular vdev,
4849 * generate an error message describing the I/O failure
4850 * at the block level. We ignore these errors if the
4851 * device is currently unavailable.
4852 */
c776b317 4853 if (zio->io_error != ECKSUM && zio->io_vd != NULL &&
2bbec1c9 4854 !vdev_is_dead(zio->io_vd)) {
4f072827
DB
4855 int ret = zfs_ereport_post(FM_EREPORT_ZFS_IO,
4856 zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
4857 if (ret != EALREADY) {
4858 mutex_enter(&zio->io_vd->vdev_stat_lock);
4859 if (zio->io_type == ZIO_TYPE_READ)
4860 zio->io_vd->vdev_stat.vs_read_errors++;
4861 else if (zio->io_type == ZIO_TYPE_WRITE)
4862 zio->io_vd->vdev_stat.vs_write_errors++;
4863 mutex_exit(&zio->io_vd->vdev_stat_lock);
2bbec1c9 4864 }
2bbec1c9 4865 }
34dc7c2f 4866
428870ff
BB
4867 if ((zio->io_error == EIO || !(zio->io_flags &
4868 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) &&
c776b317 4869 zio == zio->io_logical) {
b128c09f
BB
4870 /*
4871 * For logical I/O requests, tell the SPA to log the
4872 * error and generate a logical data ereport.
4873 */
431083f7 4874 spa_log_error(zio->io_spa, &zio->io_bookmark,
493fcce9 4875 BP_GET_LOGICAL_BIRTH(zio->io_bp));
1144586b 4876 (void) zfs_ereport_post(FM_EREPORT_ZFS_DATA,
4f072827 4877 zio->io_spa, NULL, &zio->io_bookmark, zio, 0);
b128c09f
BB
4878 }
4879 }
34dc7c2f 4880
c776b317 4881 if (zio->io_error && zio == zio->io_logical) {
b128c09f
BB
4882 /*
4883 * Determine whether zio should be reexecuted. This will
4884 * propagate all the way to the root via zio_notify_parent().
4885 */
c776b317 4886 ASSERT(zio->io_vd == NULL && zio->io_bp != NULL);
428870ff 4887 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
b128c09f 4888
428870ff
BB
4889 if (IO_IS_ALLOCATING(zio) &&
4890 !(zio->io_flags & ZIO_FLAG_CANFAIL)) {
b128c09f
BB
4891 if (zio->io_error != ENOSPC)
4892 zio->io_reexecute |= ZIO_REEXECUTE_NOW;
4893 else
4894 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
428870ff 4895 }
b128c09f
BB
4896
4897 if ((zio->io_type == ZIO_TYPE_READ ||
4898 zio->io_type == ZIO_TYPE_FREE) &&
572e2857 4899 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) &&
b128c09f 4900 zio->io_error == ENXIO &&
c776b317
BB
4901 spa_load_state(zio->io_spa) == SPA_LOAD_NONE &&
4902 spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE)
b128c09f
BB
4903 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
4904
4905 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute)
4906 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
428870ff
BB
4907
4908 /*
4909 * Here is a possibly good place to attempt to do
4910 * either combinatorial reconstruction or error correction
4911 * based on checksums. It also might be a good place
4912 * to send out preliminary ereports before we suspend
4913 * processing.
4914 */
34dc7c2f
BB
4915 }
4916
4917 /*
b128c09f
BB
4918 * If there were logical child errors, they apply to us now.
4919 * We defer this until now to avoid conflating logical child
4920 * errors with errors that happened to the zio itself when
4921 * updating vdev stats and reporting FMA events above.
34dc7c2f 4922 */
b128c09f 4923 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL);
34dc7c2f 4924
428870ff
BB
4925 if ((zio->io_error || zio->io_reexecute) &&
4926 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio &&
03c6040b 4927 !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)))
c776b317 4928 zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp);
9babb374
BB
4929
4930 zio_gang_tree_free(&zio->io_gang_tree);
4931
4932 /*
4933 * Godfather I/Os should never suspend.
4934 */
4935 if ((zio->io_flags & ZIO_FLAG_GODFATHER) &&
4936 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND))
a32494d2 4937 zio->io_reexecute &= ~ZIO_REEXECUTE_SUSPEND;
9babb374 4938
b128c09f
BB
4939 if (zio->io_reexecute) {
4940 /*
4941 * This is a logical I/O that wants to reexecute.
4942 *
4943 * Reexecute is top-down. When an i/o fails, if it's not
4944 * the root, it simply notifies its parent and sticks around.
4945 * The parent, seeing that it still has children in zio_done(),
4946 * does the same. This percolates all the way up to the root.
4947 * The root i/o will reexecute or suspend the entire tree.
4948 *
4949 * This approach ensures that zio_reexecute() honors
4950 * all the original i/o dependency relationships, e.g.
4951 * parents not executing until children are ready.
4952 */
4953 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
34dc7c2f 4954
9babb374 4955 zio->io_gang_leader = NULL;
b128c09f 4956
d164b209
BB
4957 mutex_enter(&zio->io_lock);
4958 zio->io_state[ZIO_WAIT_DONE] = 1;
4959 mutex_exit(&zio->io_lock);
4960
9babb374
BB
4961 /*
4962 * "The Godfather" I/O monitors its children but is
4963 * not a true parent to them. It will track them through
4964 * the pipeline but severs its ties whenever they get into
4965 * trouble (e.g. suspended). This allows "The Godfather"
4966 * I/O to return status without blocking.
4967 */
3dfb57a3
DB
4968 zl = NULL;
4969 for (pio = zio_walk_parents(zio, &zl); pio != NULL;
4970 pio = pio_next) {
4971 zio_link_t *remove_zl = zl;
4972 pio_next = zio_walk_parents(zio, &zl);
9babb374
BB
4973
4974 if ((pio->io_flags & ZIO_FLAG_GODFATHER) &&
4975 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) {
3dfb57a3 4976 zio_remove_child(pio, zio, remove_zl);
62840030
MA
4977 /*
4978 * This is a rare code path, so we don't
4979 * bother with "next_to_execute".
4980 */
4981 zio_notify_parent(pio, zio, ZIO_WAIT_DONE,
4982 NULL);
9babb374
BB
4983 }
4984 }
4985
d164b209 4986 if ((pio = zio_unique_parent(zio)) != NULL) {
b128c09f
BB
4987 /*
4988 * We're not a root i/o, so there's nothing to do
4989 * but notify our parent. Don't propagate errors
4990 * upward since we haven't permanently failed yet.
4991 */
9babb374 4992 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
b128c09f 4993 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE;
62840030
MA
4994 /*
4995 * This is a rare code path, so we don't bother with
4996 * "next_to_execute".
4997 */
4998 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, NULL);
b128c09f
BB
4999 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) {
5000 /*
5001 * We'd fail again if we reexecuted now, so suspend
5002 * until conditions improve (e.g. device comes online).
5003 */
cec3a0a1 5004 zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR);
b128c09f
BB
5005 } else {
5006 /*
5007 * Reexecution is potentially a huge amount of work.
5008 * Hand it off to the otherwise-unused claim taskq.
5009 */
a38718a6 5010 ASSERT(taskq_empty_ent(&zio->io_tqent));
7ef5e54e
AL
5011 spa_taskq_dispatch_ent(zio->io_spa,
5012 ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE,
3bd4df38 5013 zio_reexecute, zio, 0, &zio->io_tqent, NULL);
b128c09f 5014 }
62840030 5015 return (NULL);
34dc7c2f
BB
5016 }
5017
ccec7fbe 5018 ASSERT(list_is_empty(&zio->io_child_list));
b128c09f
BB
5019 ASSERT(zio->io_reexecute == 0);
5020 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL));
34dc7c2f 5021
428870ff
BB
5022 /*
5023 * Report any checksum errors, since the I/O is complete.
5024 */
5025 while (zio->io_cksum_report != NULL) {
5026 zio_cksum_report_t *zcr = zio->io_cksum_report;
5027 zio->io_cksum_report = zcr->zcr_next;
5028 zcr->zcr_next = NULL;
5029 zcr->zcr_finish(zcr, NULL);
5030 zfs_ereport_free_checksum(zcr);
5031 }
5032
d164b209
BB
5033 /*
5034 * It is the responsibility of the done callback to ensure that this
5035 * particular zio is no longer discoverable for adoption, and as
5036 * such, cannot acquire any new parents.
5037 */
b128c09f
BB
5038 if (zio->io_done)
5039 zio->io_done(zio);
34dc7c2f 5040
d164b209
BB
5041 mutex_enter(&zio->io_lock);
5042 zio->io_state[ZIO_WAIT_DONE] = 1;
5043 mutex_exit(&zio->io_lock);
34dc7c2f 5044
62840030
MA
5045 /*
5046 * We are done executing this zio. We may want to execute a parent
5047 * next. See the comment in zio_notify_parent().
5048 */
5049 zio_t *next_to_execute = NULL;
3dfb57a3
DB
5050 zl = NULL;
5051 for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) {
5052 zio_link_t *remove_zl = zl;
5053 pio_next = zio_walk_parents(zio, &zl);
5054 zio_remove_child(pio, zio, remove_zl);
62840030 5055 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, &next_to_execute);
b128c09f 5056 }
34dc7c2f 5057
b128c09f
BB
5058 if (zio->io_waiter != NULL) {
5059 mutex_enter(&zio->io_lock);
5060 zio->io_executor = NULL;
5061 cv_broadcast(&zio->io_cv);
5062 mutex_exit(&zio->io_lock);
5063 } else {
5064 zio_destroy(zio);
5065 }
34dc7c2f 5066
62840030 5067 return (next_to_execute);
34dc7c2f
BB
5068}
5069
5070/*
b128c09f
BB
5071 * ==========================================================================
5072 * I/O pipeline definition
5073 * ==========================================================================
34dc7c2f 5074 */
428870ff 5075static zio_pipe_stage_t *zio_pipeline[] = {
b128c09f 5076 NULL,
b128c09f 5077 zio_read_bp_init,
3dfb57a3 5078 zio_write_bp_init,
428870ff
BB
5079 zio_free_bp_init,
5080 zio_issue_async,
3dfb57a3 5081 zio_write_compress,
b5256303 5082 zio_encrypt,
b128c09f 5083 zio_checksum_generate,
03c6040b 5084 zio_nop_write,
67a1b037 5085 zio_brt_free,
428870ff
BB
5086 zio_ddt_read_start,
5087 zio_ddt_read_done,
5088 zio_ddt_write,
5089 zio_ddt_free,
b128c09f
BB
5090 zio_gang_assemble,
5091 zio_gang_issue,
3dfb57a3 5092 zio_dva_throttle,
b128c09f
BB
5093 zio_dva_allocate,
5094 zio_dva_free,
5095 zio_dva_claim,
5096 zio_ready,
5097 zio_vdev_io_start,
5098 zio_vdev_io_done,
5099 zio_vdev_io_assess,
5100 zio_checksum_verify,
5101 zio_done
5102};
c28b2279 5103
9ae529ec 5104
9ae529ec 5105
9ae529ec 5106
fcff0f35
PD
5107/*
5108 * Compare two zbookmark_phys_t's to see which we would reach first in a
5109 * pre-order traversal of the object tree.
5110 *
5111 * This is simple in every case aside from the meta-dnode object. For all other
5112 * objects, we traverse them in order (object 1 before object 2, and so on).
5113 * However, all of these objects are traversed while traversing object 0, since
5114 * the data it points to is the list of objects. Thus, we need to convert to a
5115 * canonical representation so we can compare meta-dnode bookmarks to
5116 * non-meta-dnode bookmarks.
5117 *
5118 * We do this by calculating "equivalents" for each field of the zbookmark.
5119 * zbookmarks outside of the meta-dnode use their own object and level, and
5120 * calculate the level 0 equivalent (the first L0 blkid that is contained in the
5121 * blocks this bookmark refers to) by multiplying their blkid by their span
5122 * (the number of L0 blocks contained within one block at their level).
5123 * zbookmarks inside the meta-dnode calculate their object equivalent
5124 * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use
5125 * level + 1<<31 (any value larger than a level could ever be) for their level.
5126 * This causes them to always compare before a bookmark in their object
5127 * equivalent, compare appropriately to bookmarks in other objects, and to
5128 * compare appropriately to other bookmarks in the meta-dnode.
5129 */
5130int
5131zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2,
5132 const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2)
5133{
5134 /*
5135 * These variables represent the "equivalent" values for the zbookmark,
5136 * after converting zbookmarks inside the meta dnode to their
5137 * normal-object equivalents.
5138 */
5139 uint64_t zb1obj, zb2obj;
5140 uint64_t zb1L0, zb2L0;
5141 uint64_t zb1level, zb2level;
5142
5143 if (zb1->zb_object == zb2->zb_object &&
5144 zb1->zb_level == zb2->zb_level &&
5145 zb1->zb_blkid == zb2->zb_blkid)
5146 return (0);
9ae529ec 5147
30af21b0
PD
5148 IMPLY(zb1->zb_level > 0, ibs1 >= SPA_MINBLOCKSHIFT);
5149 IMPLY(zb2->zb_level > 0, ibs2 >= SPA_MINBLOCKSHIFT);
5150
fcff0f35
PD
5151 /*
5152 * BP_SPANB calculates the span in blocks.
5153 */
5154 zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level);
5155 zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level);
9ae529ec
CS
5156
5157 if (zb1->zb_object == DMU_META_DNODE_OBJECT) {
fcff0f35
PD
5158 zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
5159 zb1L0 = 0;
5160 zb1level = zb1->zb_level + COMPARE_META_LEVEL;
5161 } else {
5162 zb1obj = zb1->zb_object;
5163 zb1level = zb1->zb_level;
9ae529ec
CS
5164 }
5165
fcff0f35
PD
5166 if (zb2->zb_object == DMU_META_DNODE_OBJECT) {
5167 zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
5168 zb2L0 = 0;
5169 zb2level = zb2->zb_level + COMPARE_META_LEVEL;
5170 } else {
5171 zb2obj = zb2->zb_object;
5172 zb2level = zb2->zb_level;
5173 }
5174
5175 /* Now that we have a canonical representation, do the comparison. */
5176 if (zb1obj != zb2obj)
5177 return (zb1obj < zb2obj ? -1 : 1);
5178 else if (zb1L0 != zb2L0)
5179 return (zb1L0 < zb2L0 ? -1 : 1);
5180 else if (zb1level != zb2level)
5181 return (zb1level > zb2level ? -1 : 1);
5182 /*
5183 * This can (theoretically) happen if the bookmarks have the same object
5184 * and level, but different blkids, if the block sizes are not the same.
5185 * There is presently no way to change the indirect block sizes
5186 */
5187 return (0);
5188}
5189
5190/*
5191 * This function checks the following: given that last_block is the place that
5192 * our traversal stopped last time, does that guarantee that we've visited
5193 * every node under subtree_root? Therefore, we can't just use the raw output
5194 * of zbookmark_compare. We have to pass in a modified version of
5195 * subtree_root; by incrementing the block id, and then checking whether
5196 * last_block is before or equal to that, we can tell whether or not having
5197 * visited last_block implies that all of subtree_root's children have been
5198 * visited.
5199 */
5200boolean_t
5201zbookmark_subtree_completed(const dnode_phys_t *dnp,
5202 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
5203{
5204 zbookmark_phys_t mod_zb = *subtree_root;
5205 mod_zb.zb_blkid++;
33dba8c7 5206 ASSERT0(last_block->zb_level);
fcff0f35
PD
5207
5208 /* The objset_phys_t isn't before anything. */
5209 if (dnp == NULL)
9ae529ec 5210 return (B_FALSE);
fcff0f35
PD
5211
5212 /*
5213 * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the
5214 * data block size in sectors, because that variable is only used if
5215 * the bookmark refers to a block in the meta-dnode. Since we don't
5216 * know without examining it what object it refers to, and there's no
5217 * harm in passing in this value in other cases, we always pass it in.
5218 *
5219 * We pass in 0 for the indirect block size shift because zb2 must be
5220 * level 0. The indirect block size is only used to calculate the span
5221 * of the bookmark, but since the bookmark must be level 0, the span is
5222 * always 1, so the math works out.
5223 *
5224 * If you make changes to how the zbookmark_compare code works, be sure
5225 * to make sure that this code still works afterwards.
5226 */
5227 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
5228 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb,
5229 last_block) <= 0);
9ae529ec
CS
5230}
5231
33dba8c7
AM
5232/*
5233 * This function is similar to zbookmark_subtree_completed(), but returns true
5234 * if subtree_root is equal or ahead of last_block, i.e. still to be done.
5235 */
5236boolean_t
5237zbookmark_subtree_tbd(const dnode_phys_t *dnp,
5238 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
5239{
5240 ASSERT0(last_block->zb_level);
5241 if (dnp == NULL)
5242 return (B_FALSE);
5243 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
5244 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, subtree_root,
5245 last_block) >= 0);
5246}
5247
c28b2279 5248EXPORT_SYMBOL(zio_type_name);
81971b13
BB
5249EXPORT_SYMBOL(zio_buf_alloc);
5250EXPORT_SYMBOL(zio_data_buf_alloc);
5251EXPORT_SYMBOL(zio_buf_free);
5252EXPORT_SYMBOL(zio_data_buf_free);
c28b2279 5253
03fdcb9a 5254ZFS_MODULE_PARAM(zfs_zio, zio_, slow_io_ms, INT, ZMOD_RW,
ad796b8a 5255 "Max I/O completion time (milliseconds) before marking it as slow");
c409e464 5256
03fdcb9a
MM
5257ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW,
5258 "Prioritize requeued I/O");
29dee3ee 5259
fdc2d303 5260ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free, UINT, ZMOD_RW,
d1d7e268 5261 "Defer frees starting in this pass");
29dee3ee 5262
fdc2d303 5263ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, UINT, ZMOD_RW,
d1d7e268 5264 "Don't compress starting in this pass");
29dee3ee 5265
fdc2d303 5266ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, UINT, ZMOD_RW,
d1d7e268 5267 "Rewrite new bps starting in this pass");
3dfb57a3 5268
03fdcb9a 5269ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW,
3dfb57a3 5270 "Throttle block allocations in the ZIO pipeline");
638dd5f4 5271
03fdcb9a 5272ZFS_MODULE_PARAM(zfs_zio, zio_, deadman_log_all, INT, ZMOD_RW,
638dd5f4 5273 "Log all slow ZIOs, not just those with vdevs");