]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/zio.c
Enable -Wwrite-strings
[mirror_zfs.git] / module / zfs / zio.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
4f072827 23 * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
a38718a6 24 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
cc99f275 25 * Copyright (c) 2017, Intel Corporation.
10b3c7f5
MN
26 * Copyright (c) 2019, Klara Inc.
27 * Copyright (c) 2019, Allan Jude
f2286383 28 * Copyright (c) 2021, Datto, Inc.
34dc7c2f
BB
29 */
30
f1512ee6 31#include <sys/sysmacros.h>
34dc7c2f
BB
32#include <sys/zfs_context.h>
33#include <sys/fm/fs/zfs.h>
34#include <sys/spa.h>
35#include <sys/txg.h>
36#include <sys/spa_impl.h>
37#include <sys/vdev_impl.h>
1b939560 38#include <sys/vdev_trim.h>
34dc7c2f
BB
39#include <sys/zio_impl.h>
40#include <sys/zio_compress.h>
41#include <sys/zio_checksum.h>
428870ff
BB
42#include <sys/dmu_objset.h>
43#include <sys/arc.h>
44#include <sys/ddt.h>
9b67f605 45#include <sys/blkptr.h>
b0bc7a84 46#include <sys/zfeature.h>
d4a72f23 47#include <sys/dsl_scan.h>
3dfb57a3 48#include <sys/metaslab_impl.h>
193a37cb 49#include <sys/time.h>
e5d1c27e 50#include <sys/trace_zfs.h>
a6255b7f 51#include <sys/abd.h>
b5256303 52#include <sys/dsl_crypt.h>
3f387973 53#include <cityhash.h>
34dc7c2f 54
34dc7c2f
BB
55/*
56 * ==========================================================================
57 * I/O type descriptions
58 * ==========================================================================
59 */
18168da7 60const char *const zio_type_name[ZIO_TYPES] = {
3dfb57a3
DB
61 /*
62 * Note: Linux kernel thread name length is limited
63 * so these names will differ from upstream open zfs.
64 */
1b939560 65 "z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_ioctl", "z_trim"
428870ff 66};
34dc7c2f 67
27f2b90d 68int zio_dva_throttle_enabled = B_TRUE;
18168da7 69static int zio_deadman_log_all = B_FALSE;
3dfb57a3 70
34dc7c2f
BB
71/*
72 * ==========================================================================
73 * I/O kmem caches
74 * ==========================================================================
75 */
18168da7
AZ
76static kmem_cache_t *zio_cache;
77static kmem_cache_t *zio_link_cache;
34dc7c2f
BB
78kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
79kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
a6255b7f 80#if defined(ZFS_DEBUG) && !defined(_KERNEL)
18168da7
AZ
81static uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
82static uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
a6255b7f
DQ
83#endif
84
ad796b8a 85/* Mark IOs as "slow" if they take longer than 30 seconds */
18168da7 86static int zio_slow_io_ms = (30 * MILLISEC);
34dc7c2f 87
fcff0f35
PD
88#define BP_SPANB(indblkshift, level) \
89 (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
90#define COMPARE_META_LEVEL 0x80000000ul
55d85d5a
GW
91/*
92 * The following actions directly effect the spa's sync-to-convergence logic.
93 * The values below define the sync pass when we start performing the action.
94 * Care should be taken when changing these values as they directly impact
95 * spa_sync() performance. Tuning these values may introduce subtle performance
96 * pathologies and should only be done in the context of performance analysis.
97 * These tunables will eventually be removed and replaced with #defines once
98 * enough analysis has been done to determine optimal values.
99 *
100 * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
101 * regular blocks are not deferred.
be89734a
MA
102 *
103 * Starting in sync pass 8 (zfs_sync_pass_dont_compress), we disable
104 * compression (including of metadata). In practice, we don't have this
105 * many sync passes, so this has no effect.
106 *
107 * The original intent was that disabling compression would help the sync
108 * passes to converge. However, in practice disabling compression increases
109 * the average number of sync passes, because when we turn compression off, a
110 * lot of block's size will change and thus we have to re-allocate (not
111 * overwrite) them. It also increases the number of 128KB allocations (e.g.
112 * for indirect blocks and spacemaps) because these will not be compressed.
113 * The 128K allocations are especially detrimental to performance on highly
114 * fragmented systems, which may have very few free segments of this size,
115 * and may need to load new metaslabs to satisfy 128K allocations.
55d85d5a
GW
116 */
117int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */
18168da7
AZ
118static int zfs_sync_pass_dont_compress = 8; /* don't compress s. i. t. p. */
119static int zfs_sync_pass_rewrite = 2; /* rewrite new bps s. i. t. p. */
55d85d5a 120
34dc7c2f 121/*
b128c09f
BB
122 * An allocating zio is one that either currently has the DVA allocate
123 * stage set or will have it later in its lifetime.
34dc7c2f 124 */
428870ff
BB
125#define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE)
126
3c502d3b
MM
127/*
128 * Enable smaller cores by excluding metadata
129 * allocations as well.
130 */
131int zio_exclude_metadata = 0;
18168da7 132static int zio_requeue_io_start_cut_in_line = 1;
428870ff
BB
133
134#ifdef ZFS_DEBUG
18168da7 135static const int zio_buf_debug_limit = 16384;
428870ff 136#else
18168da7 137static const int zio_buf_debug_limit = 0;
428870ff 138#endif
34dc7c2f 139
da6b4005
NB
140static inline void __zio_execute(zio_t *zio);
141
3dfb57a3
DB
142static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t);
143
34dc7c2f
BB
144void
145zio_init(void)
146{
147 size_t c;
34dc7c2f 148
3941503c
BB
149 zio_cache = kmem_cache_create("zio_cache",
150 sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
d164b209 151 zio_link_cache = kmem_cache_create("zio_link_cache",
6795a698 152 sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
34dc7c2f
BB
153
154 /*
155 * For small buffers, we want a cache for each multiple of
f1512ee6
MA
156 * SPA_MINBLOCKSIZE. For larger buffers, we want a cache
157 * for each quarter-power of 2.
34dc7c2f
BB
158 */
159 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
160 size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
161 size_t p2 = size;
162 size_t align = 0;
3c502d3b
MM
163 size_t data_cflags, cflags;
164
165 data_cflags = KMC_NODEBUG;
166 cflags = (zio_exclude_metadata || size > zio_buf_debug_limit) ?
167 KMC_NODEBUG : 0;
34dc7c2f 168
f1512ee6 169 while (!ISP2(p2))
34dc7c2f
BB
170 p2 &= p2 - 1;
171
498877ba
MA
172#ifndef _KERNEL
173 /*
174 * If we are using watchpoints, put each buffer on its own page,
175 * to eliminate the performance overhead of trapping to the
176 * kernel when modifying a non-watched buffer that shares the
177 * page with a watched buffer.
178 */
179 if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE))
180 continue;
fcf64f45
BB
181 /*
182 * Here's the problem - on 4K native devices in userland on
183 * Linux using O_DIRECT, buffers must be 4K aligned or I/O
184 * will fail with EINVAL, causing zdb (and others) to coredump.
185 * Since userland probably doesn't need optimized buffer caches,
186 * we just force 4K alignment on everything.
187 */
188 align = 8 * SPA_MINBLOCKSIZE;
189#else
24fa2034 190 if (size < PAGESIZE) {
34dc7c2f 191 align = SPA_MINBLOCKSIZE;
498877ba 192 } else if (IS_P2ALIGNED(size, p2 >> 2)) {
24fa2034 193 align = PAGESIZE;
34dc7c2f 194 }
fcf64f45 195#endif
34dc7c2f
BB
196
197 if (align != 0) {
198 char name[36];
309c32c9
MG
199 if (cflags == data_cflags) {
200 /*
201 * Resulting kmem caches would be identical.
202 * Save memory by creating only one.
203 */
204 (void) snprintf(name, sizeof (name),
205 "zio_buf_comb_%lu", (ulong_t)size);
206 zio_buf_cache[c] = kmem_cache_create(name,
207 size, align, NULL, NULL, NULL, NULL, NULL,
208 cflags);
209 zio_data_buf_cache[c] = zio_buf_cache[c];
210 continue;
211 }
c9e319fa
JL
212 (void) snprintf(name, sizeof (name), "zio_buf_%lu",
213 (ulong_t)size);
34dc7c2f 214 zio_buf_cache[c] = kmem_cache_create(name, size,
6442f3cf 215 align, NULL, NULL, NULL, NULL, NULL, cflags);
34dc7c2f 216
c9e319fa
JL
217 (void) snprintf(name, sizeof (name), "zio_data_buf_%lu",
218 (ulong_t)size);
34dc7c2f 219 zio_data_buf_cache[c] = kmem_cache_create(name, size,
18ca574f 220 align, NULL, NULL, NULL, NULL, NULL, data_cflags);
34dc7c2f
BB
221 }
222 }
223
224 while (--c != 0) {
225 ASSERT(zio_buf_cache[c] != NULL);
226 if (zio_buf_cache[c - 1] == NULL)
227 zio_buf_cache[c - 1] = zio_buf_cache[c];
228
229 ASSERT(zio_data_buf_cache[c] != NULL);
230 if (zio_data_buf_cache[c - 1] == NULL)
231 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
232 }
233
34dc7c2f 234 zio_inject_init();
9759c60f
ED
235
236 lz4_init();
34dc7c2f
BB
237}
238
239void
240zio_fini(void)
241{
309c32c9 242 size_t n = SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT;
34dc7c2f 243
a6255b7f 244#if defined(ZFS_DEBUG) && !defined(_KERNEL)
309c32c9
MG
245 for (size_t i = 0; i < n; i++) {
246 if (zio_buf_cache_allocs[i] != zio_buf_cache_frees[i])
a6255b7f 247 (void) printf("zio_fini: [%d] %llu != %llu\n",
309c32c9
MG
248 (int)((i + 1) << SPA_MINBLOCKSHIFT),
249 (long long unsigned)zio_buf_cache_allocs[i],
250 (long long unsigned)zio_buf_cache_frees[i]);
251 }
f1512ee6 252#endif
309c32c9
MG
253
254 /*
255 * The same kmem cache can show up multiple times in both zio_buf_cache
256 * and zio_data_buf_cache. Do a wasteful but trivially correct scan to
257 * sort it out.
258 */
259 for (size_t i = 0; i < n; i++) {
260 kmem_cache_t *cache = zio_buf_cache[i];
261 if (cache == NULL)
262 continue;
263 for (size_t j = i; j < n; j++) {
264 if (cache == zio_buf_cache[j])
265 zio_buf_cache[j] = NULL;
266 if (cache == zio_data_buf_cache[j])
267 zio_data_buf_cache[j] = NULL;
34dc7c2f 268 }
309c32c9
MG
269 kmem_cache_destroy(cache);
270 }
34dc7c2f 271
309c32c9
MG
272 for (size_t i = 0; i < n; i++) {
273 kmem_cache_t *cache = zio_data_buf_cache[i];
274 if (cache == NULL)
275 continue;
276 for (size_t j = i; j < n; j++) {
277 if (cache == zio_data_buf_cache[j])
278 zio_data_buf_cache[j] = NULL;
34dc7c2f 279 }
309c32c9
MG
280 kmem_cache_destroy(cache);
281 }
282
283 for (size_t i = 0; i < n; i++) {
284 VERIFY3P(zio_buf_cache[i], ==, NULL);
285 VERIFY3P(zio_data_buf_cache[i], ==, NULL);
34dc7c2f
BB
286 }
287
d164b209 288 kmem_cache_destroy(zio_link_cache);
34dc7c2f
BB
289 kmem_cache_destroy(zio_cache);
290
291 zio_inject_fini();
9759c60f
ED
292
293 lz4_fini();
34dc7c2f
BB
294}
295
296/*
297 * ==========================================================================
298 * Allocate and free I/O buffers
299 * ==========================================================================
300 */
301
302/*
303 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a
304 * crashdump if the kernel panics, so use it judiciously. Obviously, it's
305 * useful to inspect ZFS metadata, but if possible, we should avoid keeping
306 * excess / transient data in-core during a crashdump.
307 */
308void *
309zio_buf_alloc(size_t size)
310{
311 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
312
63e3a861 313 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
a6255b7f
DQ
314#if defined(ZFS_DEBUG) && !defined(_KERNEL)
315 atomic_add_64(&zio_buf_cache_allocs[c], 1);
316#endif
34dc7c2f 317
efcd79a8 318 return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE));
34dc7c2f
BB
319}
320
321/*
322 * Use zio_data_buf_alloc to allocate data. The data will not appear in a
323 * crashdump if the kernel panics. This exists so that we will limit the amount
324 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount
325 * of kernel heap dumped to disk when the kernel panics)
326 */
327void *
328zio_data_buf_alloc(size_t size)
329{
330 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
331
63e3a861 332 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
34dc7c2f 333
efcd79a8 334 return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE));
34dc7c2f
BB
335}
336
337void
338zio_buf_free(void *buf, size_t size)
339{
340 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
341
63e3a861 342 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
a6255b7f
DQ
343#if defined(ZFS_DEBUG) && !defined(_KERNEL)
344 atomic_add_64(&zio_buf_cache_frees[c], 1);
345#endif
34dc7c2f
BB
346
347 kmem_cache_free(zio_buf_cache[c], buf);
348}
349
350void
351zio_data_buf_free(void *buf, size_t size)
352{
353 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
354
63e3a861 355 VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
34dc7c2f
BB
356
357 kmem_cache_free(zio_data_buf_cache[c], buf);
358}
359
84c07ada
GN
360static void
361zio_abd_free(void *abd, size_t size)
362{
14e4e3cb 363 (void) size;
84c07ada
GN
364 abd_free((abd_t *)abd);
365}
366
34dc7c2f
BB
367/*
368 * ==========================================================================
369 * Push and pop I/O transform buffers
370 * ==========================================================================
371 */
d3c2ae1c 372void
a6255b7f 373zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize,
e9aa730c 374 zio_transform_func_t *transform)
34dc7c2f 375{
79c76d5b 376 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP);
34dc7c2f 377
a6255b7f 378 zt->zt_orig_abd = zio->io_abd;
b128c09f 379 zt->zt_orig_size = zio->io_size;
34dc7c2f 380 zt->zt_bufsize = bufsize;
b128c09f 381 zt->zt_transform = transform;
34dc7c2f
BB
382
383 zt->zt_next = zio->io_transform_stack;
384 zio->io_transform_stack = zt;
385
a6255b7f 386 zio->io_abd = data;
34dc7c2f
BB
387 zio->io_size = size;
388}
389
d3c2ae1c 390void
b128c09f 391zio_pop_transforms(zio_t *zio)
34dc7c2f 392{
b128c09f
BB
393 zio_transform_t *zt;
394
395 while ((zt = zio->io_transform_stack) != NULL) {
396 if (zt->zt_transform != NULL)
397 zt->zt_transform(zio,
a6255b7f 398 zt->zt_orig_abd, zt->zt_orig_size);
34dc7c2f 399
428870ff 400 if (zt->zt_bufsize != 0)
a6255b7f 401 abd_free(zio->io_abd);
34dc7c2f 402
a6255b7f 403 zio->io_abd = zt->zt_orig_abd;
b128c09f
BB
404 zio->io_size = zt->zt_orig_size;
405 zio->io_transform_stack = zt->zt_next;
34dc7c2f 406
b128c09f 407 kmem_free(zt, sizeof (zio_transform_t));
34dc7c2f
BB
408 }
409}
410
b128c09f
BB
411/*
412 * ==========================================================================
b5256303 413 * I/O transform callbacks for subblocks, decompression, and decryption
b128c09f
BB
414 * ==========================================================================
415 */
416static void
a6255b7f 417zio_subblock(zio_t *zio, abd_t *data, uint64_t size)
b128c09f
BB
418{
419 ASSERT(zio->io_size > size);
420
421 if (zio->io_type == ZIO_TYPE_READ)
a6255b7f 422 abd_copy(data, zio->io_abd, size);
b128c09f
BB
423}
424
425static void
a6255b7f 426zio_decompress(zio_t *zio, abd_t *data, uint64_t size)
b128c09f 427{
a6255b7f
DQ
428 if (zio->io_error == 0) {
429 void *tmp = abd_borrow_buf(data, size);
430 int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp),
10b3c7f5
MN
431 zio->io_abd, tmp, zio->io_size, size,
432 &zio->io_prop.zp_complevel);
a6255b7f
DQ
433 abd_return_buf_copy(data, tmp, size);
434
c3bd3fb4
TC
435 if (zio_injection_enabled && ret == 0)
436 ret = zio_handle_fault_injection(zio, EINVAL);
437
a6255b7f
DQ
438 if (ret != 0)
439 zio->io_error = SET_ERROR(EIO);
440 }
b128c09f
BB
441}
442
b5256303
TC
443static void
444zio_decrypt(zio_t *zio, abd_t *data, uint64_t size)
445{
446 int ret;
447 void *tmp;
448 blkptr_t *bp = zio->io_bp;
ae76f45c
TC
449 spa_t *spa = zio->io_spa;
450 uint64_t dsobj = zio->io_bookmark.zb_objset;
b5256303
TC
451 uint64_t lsize = BP_GET_LSIZE(bp);
452 dmu_object_type_t ot = BP_GET_TYPE(bp);
453 uint8_t salt[ZIO_DATA_SALT_LEN];
454 uint8_t iv[ZIO_DATA_IV_LEN];
455 uint8_t mac[ZIO_DATA_MAC_LEN];
456 boolean_t no_crypt = B_FALSE;
457
458 ASSERT(BP_USES_CRYPT(bp));
459 ASSERT3U(size, !=, 0);
460
461 if (zio->io_error != 0)
462 return;
463
464 /*
465 * Verify the cksum of MACs stored in an indirect bp. It will always
466 * be possible to verify this since it does not require an encryption
467 * key.
468 */
469 if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) {
470 zio_crypt_decode_mac_bp(bp, mac);
471
472 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
473 /*
474 * We haven't decompressed the data yet, but
475 * zio_crypt_do_indirect_mac_checksum() requires
476 * decompressed data to be able to parse out the MACs
477 * from the indirect block. We decompress it now and
478 * throw away the result after we are finished.
479 */
480 tmp = zio_buf_alloc(lsize);
481 ret = zio_decompress_data(BP_GET_COMPRESS(bp),
10b3c7f5
MN
482 zio->io_abd, tmp, zio->io_size, lsize,
483 &zio->io_prop.zp_complevel);
b5256303
TC
484 if (ret != 0) {
485 ret = SET_ERROR(EIO);
486 goto error;
487 }
488 ret = zio_crypt_do_indirect_mac_checksum(B_FALSE,
489 tmp, lsize, BP_SHOULD_BYTESWAP(bp), mac);
490 zio_buf_free(tmp, lsize);
491 } else {
492 ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE,
493 zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac);
494 }
495 abd_copy(data, zio->io_abd, size);
496
be9a5c35
TC
497 if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) {
498 ret = zio_handle_decrypt_injection(spa,
499 &zio->io_bookmark, ot, ECKSUM);
500 }
b5256303
TC
501 if (ret != 0)
502 goto error;
503
504 return;
505 }
506
507 /*
508 * If this is an authenticated block, just check the MAC. It would be
509 * nice to separate this out into its own flag, but for the moment
510 * enum zio_flag is out of bits.
511 */
512 if (BP_IS_AUTHENTICATED(bp)) {
513 if (ot == DMU_OT_OBJSET) {
ae76f45c
TC
514 ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa,
515 dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp));
b5256303
TC
516 } else {
517 zio_crypt_decode_mac_bp(bp, mac);
ae76f45c
TC
518 ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj,
519 zio->io_abd, size, mac);
be9a5c35
TC
520 if (zio_injection_enabled && ret == 0) {
521 ret = zio_handle_decrypt_injection(spa,
522 &zio->io_bookmark, ot, ECKSUM);
523 }
b5256303
TC
524 }
525 abd_copy(data, zio->io_abd, size);
526
527 if (ret != 0)
528 goto error;
529
530 return;
531 }
532
533 zio_crypt_decode_params_bp(bp, salt, iv);
534
535 if (ot == DMU_OT_INTENT_LOG) {
536 tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t));
537 zio_crypt_decode_mac_zil(tmp, mac);
538 abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t));
539 } else {
540 zio_crypt_decode_mac_bp(bp, mac);
541 }
542
be9a5c35
TC
543 ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp),
544 BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data,
545 zio->io_abd, &no_crypt);
b5256303
TC
546 if (no_crypt)
547 abd_copy(data, zio->io_abd, size);
548
549 if (ret != 0)
550 goto error;
551
552 return;
553
554error:
555 /* assert that the key was found unless this was speculative */
be9a5c35 556 ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE));
b5256303
TC
557
558 /*
559 * If there was a decryption / authentication error return EIO as
560 * the io_error. If this was not a speculative zio, create an ereport.
561 */
562 if (ret == ECKSUM) {
a2c2ed1b 563 zio->io_error = SET_ERROR(EIO);
b5256303 564 if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
be9a5c35 565 spa_log_error(spa, &zio->io_bookmark);
1144586b 566 (void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
4f072827 567 spa, NULL, &zio->io_bookmark, zio, 0);
b5256303
TC
568 }
569 } else {
570 zio->io_error = ret;
571 }
572}
573
b128c09f
BB
574/*
575 * ==========================================================================
576 * I/O parent/child relationships and pipeline interlocks
577 * ==========================================================================
578 */
d164b209 579zio_t *
3dfb57a3 580zio_walk_parents(zio_t *cio, zio_link_t **zl)
d164b209 581{
d164b209 582 list_t *pl = &cio->io_parent_list;
b128c09f 583
3dfb57a3
DB
584 *zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl);
585 if (*zl == NULL)
d164b209
BB
586 return (NULL);
587
3dfb57a3
DB
588 ASSERT((*zl)->zl_child == cio);
589 return ((*zl)->zl_parent);
d164b209
BB
590}
591
592zio_t *
3dfb57a3 593zio_walk_children(zio_t *pio, zio_link_t **zl)
d164b209 594{
d164b209
BB
595 list_t *cl = &pio->io_child_list;
596
a8b2e306
TC
597 ASSERT(MUTEX_HELD(&pio->io_lock));
598
3dfb57a3
DB
599 *zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl);
600 if (*zl == NULL)
d164b209
BB
601 return (NULL);
602
3dfb57a3
DB
603 ASSERT((*zl)->zl_parent == pio);
604 return ((*zl)->zl_child);
d164b209
BB
605}
606
607zio_t *
608zio_unique_parent(zio_t *cio)
609{
3dfb57a3
DB
610 zio_link_t *zl = NULL;
611 zio_t *pio = zio_walk_parents(cio, &zl);
d164b209 612
3dfb57a3 613 VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL);
d164b209
BB
614 return (pio);
615}
616
617void
618zio_add_child(zio_t *pio, zio_t *cio)
b128c09f 619{
79c76d5b 620 zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
d164b209
BB
621
622 /*
623 * Logical I/Os can have logical, gang, or vdev children.
624 * Gang I/Os can have gang or vdev children.
625 * Vdev I/Os can only have vdev children.
626 * The following ASSERT captures all of these constraints.
627 */
1ce23dca 628 ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
d164b209
BB
629
630 zl->zl_parent = pio;
631 zl->zl_child = cio;
632
b128c09f 633 mutex_enter(&pio->io_lock);
a8b2e306 634 mutex_enter(&cio->io_lock);
d164b209
BB
635
636 ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
637
1c27024e 638 for (int w = 0; w < ZIO_WAIT_TYPES; w++)
d164b209
BB
639 pio->io_children[cio->io_child_type][w] += !cio->io_state[w];
640
641 list_insert_head(&pio->io_child_list, zl);
642 list_insert_head(&cio->io_parent_list, zl);
643
428870ff
BB
644 pio->io_child_count++;
645 cio->io_parent_count++;
646
d164b209 647 mutex_exit(&cio->io_lock);
a8b2e306 648 mutex_exit(&pio->io_lock);
b128c09f
BB
649}
650
34dc7c2f 651static void
d164b209 652zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl)
b128c09f 653{
d164b209
BB
654 ASSERT(zl->zl_parent == pio);
655 ASSERT(zl->zl_child == cio);
b128c09f
BB
656
657 mutex_enter(&pio->io_lock);
a8b2e306 658 mutex_enter(&cio->io_lock);
d164b209
BB
659
660 list_remove(&pio->io_child_list, zl);
661 list_remove(&cio->io_parent_list, zl);
662
428870ff
BB
663 pio->io_child_count--;
664 cio->io_parent_count--;
665
d164b209 666 mutex_exit(&cio->io_lock);
a8b2e306 667 mutex_exit(&pio->io_lock);
d164b209 668 kmem_cache_free(zio_link_cache, zl);
b128c09f
BB
669}
670
671static boolean_t
ddc751d5 672zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait)
34dc7c2f 673{
b128c09f
BB
674 boolean_t waiting = B_FALSE;
675
676 mutex_enter(&zio->io_lock);
677 ASSERT(zio->io_stall == NULL);
ddc751d5
GW
678 for (int c = 0; c < ZIO_CHILD_TYPES; c++) {
679 if (!(ZIO_CHILD_BIT_IS_SET(childbits, c)))
680 continue;
681
682 uint64_t *countp = &zio->io_children[c][wait];
683 if (*countp != 0) {
684 zio->io_stage >>= 1;
685 ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN);
686 zio->io_stall = countp;
687 waiting = B_TRUE;
688 break;
689 }
b128c09f
BB
690 }
691 mutex_exit(&zio->io_lock);
b128c09f
BB
692 return (waiting);
693}
34dc7c2f 694
bf701a83
BB
695__attribute__((always_inline))
696static inline void
62840030
MA
697zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait,
698 zio_t **next_to_executep)
b128c09f
BB
699{
700 uint64_t *countp = &pio->io_children[zio->io_child_type][wait];
701 int *errorp = &pio->io_child_error[zio->io_child_type];
34dc7c2f 702
b128c09f
BB
703 mutex_enter(&pio->io_lock);
704 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
705 *errorp = zio_worst_error(*errorp, zio->io_error);
706 pio->io_reexecute |= zio->io_reexecute;
707 ASSERT3U(*countp, >, 0);
e8b96c60
MA
708
709 (*countp)--;
710
711 if (*countp == 0 && pio->io_stall == countp) {
3dfb57a3
DB
712 zio_taskq_type_t type =
713 pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE :
714 ZIO_TASKQ_INTERRUPT;
b128c09f
BB
715 pio->io_stall = NULL;
716 mutex_exit(&pio->io_lock);
62840030 717
3dfb57a3 718 /*
62840030
MA
719 * If we can tell the caller to execute this parent next, do
720 * so. Otherwise dispatch the parent zio as its own task.
721 *
722 * Having the caller execute the parent when possible reduces
723 * locking on the zio taskq's, reduces context switch
724 * overhead, and has no recursion penalty. Note that one
725 * read from disk typically causes at least 3 zio's: a
726 * zio_null(), the logical zio_read(), and then a physical
727 * zio. When the physical ZIO completes, we are able to call
728 * zio_done() on all 3 of these zio's from one invocation of
729 * zio_execute() by returning the parent back to
730 * zio_execute(). Since the parent isn't executed until this
731 * thread returns back to zio_execute(), the caller should do
732 * so promptly.
733 *
734 * In other cases, dispatching the parent prevents
735 * overflowing the stack when we have deeply nested
736 * parent-child relationships, as we do with the "mega zio"
737 * of writes for spa_sync(), and the chain of ZIL blocks.
3dfb57a3 738 */
62840030
MA
739 if (next_to_executep != NULL && *next_to_executep == NULL) {
740 *next_to_executep = pio;
741 } else {
742 zio_taskq_dispatch(pio, type, B_FALSE);
743 }
b128c09f
BB
744 } else {
745 mutex_exit(&pio->io_lock);
34dc7c2f
BB
746 }
747}
748
b128c09f
BB
749static void
750zio_inherit_child_errors(zio_t *zio, enum zio_child c)
751{
752 if (zio->io_child_error[c] != 0 && zio->io_error == 0)
753 zio->io_error = zio->io_child_error[c];
754}
755
3dfb57a3 756int
64fc7762 757zio_bookmark_compare(const void *x1, const void *x2)
3dfb57a3
DB
758{
759 const zio_t *z1 = x1;
760 const zio_t *z2 = x2;
3dfb57a3 761
64fc7762
MA
762 if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset)
763 return (-1);
764 if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset)
765 return (1);
3dfb57a3 766
64fc7762
MA
767 if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object)
768 return (-1);
769 if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object)
770 return (1);
3dfb57a3 771
64fc7762
MA
772 if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level)
773 return (-1);
774 if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level)
775 return (1);
776
777 if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid)
778 return (-1);
779 if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid)
780 return (1);
781
782 if (z1 < z2)
783 return (-1);
784 if (z1 > z2)
785 return (1);
786
787 return (0);
3dfb57a3
DB
788}
789
34dc7c2f
BB
790/*
791 * ==========================================================================
b128c09f 792 * Create the various types of I/O (read, write, free, etc)
34dc7c2f
BB
793 * ==========================================================================
794 */
795static zio_t *
428870ff 796zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
a6255b7f 797 abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done,
2aa34383
DK
798 void *private, zio_type_t type, zio_priority_t priority,
799 enum zio_flag flags, vdev_t *vd, uint64_t offset,
800 const zbookmark_phys_t *zb, enum zio_stage stage,
801 enum zio_stage pipeline)
34dc7c2f
BB
802{
803 zio_t *zio;
804
1b939560 805 IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE);
2aa34383 806 ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0);
b128c09f
BB
807 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
808
809 ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
810 ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
811 ASSERT(vd || stage == ZIO_STAGE_OPEN);
34dc7c2f 812
b5256303 813 IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0);
2aa34383 814
79c76d5b 815 zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
861166b0 816 memset(zio, 0, sizeof (zio_t));
3941503c 817
448d7aaa 818 mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL);
3941503c
BB
819 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
820
821 list_create(&zio->io_parent_list, sizeof (zio_link_t),
822 offsetof(zio_link_t, zl_parent_node));
823 list_create(&zio->io_child_list, sizeof (zio_link_t),
824 offsetof(zio_link_t, zl_child_node));
4e21fd06 825 metaslab_trace_init(&zio->io_alloc_list);
d164b209 826
b128c09f
BB
827 if (vd != NULL)
828 zio->io_child_type = ZIO_CHILD_VDEV;
829 else if (flags & ZIO_FLAG_GANG_CHILD)
830 zio->io_child_type = ZIO_CHILD_GANG;
428870ff
BB
831 else if (flags & ZIO_FLAG_DDT_CHILD)
832 zio->io_child_type = ZIO_CHILD_DDT;
b128c09f
BB
833 else
834 zio->io_child_type = ZIO_CHILD_LOGICAL;
835
34dc7c2f 836 if (bp != NULL) {
428870ff 837 zio->io_bp = (blkptr_t *)bp;
34dc7c2f
BB
838 zio->io_bp_copy = *bp;
839 zio->io_bp_orig = *bp;
428870ff
BB
840 if (type != ZIO_TYPE_WRITE ||
841 zio->io_child_type == ZIO_CHILD_DDT)
b128c09f 842 zio->io_bp = &zio->io_bp_copy; /* so caller can free */
9babb374 843 if (zio->io_child_type == ZIO_CHILD_LOGICAL)
b128c09f 844 zio->io_logical = zio;
9babb374
BB
845 if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp))
846 pipeline |= ZIO_GANG_STAGES;
34dc7c2f 847 }
b128c09f
BB
848
849 zio->io_spa = spa;
850 zio->io_txg = txg;
34dc7c2f
BB
851 zio->io_done = done;
852 zio->io_private = private;
853 zio->io_type = type;
854 zio->io_priority = priority;
b128c09f
BB
855 zio->io_vd = vd;
856 zio->io_offset = offset;
a6255b7f 857 zio->io_orig_abd = zio->io_abd = data;
2aa34383
DK
858 zio->io_orig_size = zio->io_size = psize;
859 zio->io_lsize = lsize;
b128c09f
BB
860 zio->io_orig_flags = zio->io_flags = flags;
861 zio->io_orig_stage = zio->io_stage = stage;
862 zio->io_orig_pipeline = zio->io_pipeline = pipeline;
3dfb57a3 863 zio->io_pipeline_trace = ZIO_STAGE_OPEN;
34dc7c2f 864
d164b209
BB
865 zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY);
866 zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE);
867
b128c09f
BB
868 if (zb != NULL)
869 zio->io_bookmark = *zb;
870
871 if (pio != NULL) {
1b50749c 872 zio->io_metaslab_class = pio->io_metaslab_class;
b128c09f 873 if (zio->io_logical == NULL)
34dc7c2f 874 zio->io_logical = pio->io_logical;
9babb374
BB
875 if (zio->io_child_type == ZIO_CHILD_GANG)
876 zio->io_gang_leader = pio->io_gang_leader;
b128c09f 877 zio_add_child(pio, zio);
34dc7c2f
BB
878 }
879
a38718a6
GA
880 taskq_init_ent(&zio->io_tqent);
881
34dc7c2f
BB
882 return (zio);
883}
884
885static void
b128c09f 886zio_destroy(zio_t *zio)
34dc7c2f 887{
4e21fd06 888 metaslab_trace_fini(&zio->io_alloc_list);
3941503c
BB
889 list_destroy(&zio->io_parent_list);
890 list_destroy(&zio->io_child_list);
891 mutex_destroy(&zio->io_lock);
892 cv_destroy(&zio->io_cv);
b128c09f 893 kmem_cache_free(zio_cache, zio);
34dc7c2f
BB
894}
895
896zio_t *
d164b209 897zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done,
428870ff 898 void *private, enum zio_flag flags)
34dc7c2f
BB
899{
900 zio_t *zio;
901
2aa34383 902 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
d164b209 903 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
b128c09f 904 ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE);
34dc7c2f
BB
905
906 return (zio);
907}
908
909zio_t *
428870ff 910zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags)
34dc7c2f 911{
d164b209 912 return (zio_null(NULL, spa, NULL, done, private, flags));
34dc7c2f
BB
913}
914
bc67cba7
PZ
915static int
916zfs_blkptr_verify_log(spa_t *spa, const blkptr_t *bp,
917 enum blk_verify_flag blk_verify, const char *fmt, ...)
918{
919 va_list adx;
920 char buf[256];
921
922 va_start(adx, fmt);
923 (void) vsnprintf(buf, sizeof (buf), fmt, adx);
924 va_end(adx);
925
926 switch (blk_verify) {
927 case BLK_VERIFY_HALT:
f49db9b5 928 dprintf_bp(bp, "blkptr at %p dprintf_bp():", bp);
bc67cba7
PZ
929 zfs_panic_recover("%s: %s", spa_name(spa), buf);
930 break;
931 case BLK_VERIFY_LOG:
932 zfs_dbgmsg("%s: %s", spa_name(spa), buf);
933 break;
934 case BLK_VERIFY_ONLY:
935 break;
936 }
937
938 return (1);
939}
940
941/*
942 * Verify the block pointer fields contain reasonable values. This means
943 * it only contains known object types, checksum/compression identifiers,
944 * block sizes within the maximum allowed limits, valid DVAs, etc.
945 *
946 * If everything checks out B_TRUE is returned. The zfs_blkptr_verify
947 * argument controls the behavior when an invalid field is detected.
948 *
949 * Modes for zfs_blkptr_verify:
950 * 1) BLK_VERIFY_ONLY (evaluate the block)
951 * 2) BLK_VERIFY_LOG (evaluate the block and log problems)
952 * 3) BLK_VERIFY_HALT (call zfs_panic_recover on error)
953 */
954boolean_t
955zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp, boolean_t config_held,
956 enum blk_verify_flag blk_verify)
63e3a861 957{
bc67cba7
PZ
958 int errors = 0;
959
63e3a861 960 if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) {
bc67cba7
PZ
961 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
962 "blkptr at %p has invalid TYPE %llu",
63e3a861
MA
963 bp, (longlong_t)BP_GET_TYPE(bp));
964 }
2cd0f98f 965 if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS) {
bc67cba7
PZ
966 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
967 "blkptr at %p has invalid CHECKSUM %llu",
63e3a861
MA
968 bp, (longlong_t)BP_GET_CHECKSUM(bp));
969 }
2cd0f98f 970 if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS) {
bc67cba7
PZ
971 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
972 "blkptr at %p has invalid COMPRESS %llu",
63e3a861
MA
973 bp, (longlong_t)BP_GET_COMPRESS(bp));
974 }
975 if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) {
bc67cba7
PZ
976 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
977 "blkptr at %p has invalid LSIZE %llu",
63e3a861
MA
978 bp, (longlong_t)BP_GET_LSIZE(bp));
979 }
980 if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) {
bc67cba7
PZ
981 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
982 "blkptr at %p has invalid PSIZE %llu",
63e3a861
MA
983 bp, (longlong_t)BP_GET_PSIZE(bp));
984 }
985
986 if (BP_IS_EMBEDDED(bp)) {
746d4a45 987 if (BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES) {
bc67cba7
PZ
988 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
989 "blkptr at %p has invalid ETYPE %llu",
63e3a861
MA
990 bp, (longlong_t)BPE_GET_ETYPE(bp));
991 }
992 }
993
6cb8e530
PZ
994 /*
995 * Do not verify individual DVAs if the config is not trusted. This
996 * will be done once the zio is executed in vdev_mirror_map_alloc.
997 */
998 if (!spa->spa_trust_config)
b9ec4a15 999 return (errors == 0);
6cb8e530 1000
dc04a8c7
PD
1001 if (!config_held)
1002 spa_config_enter(spa, SCL_VDEV, bp, RW_READER);
1003 else
1004 ASSERT(spa_config_held(spa, SCL_VDEV, RW_WRITER));
63e3a861
MA
1005 /*
1006 * Pool-specific checks.
1007 *
1008 * Note: it would be nice to verify that the blk_birth and
1009 * BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze()
1010 * allows the birth time of log blocks (and dmu_sync()-ed blocks
1011 * that are in the log) to be arbitrarily large.
1012 */
1c27024e 1013 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
2b56a634
MA
1014 const dva_t *dva = &bp->blk_dva[i];
1015 uint64_t vdevid = DVA_GET_VDEV(dva);
1c27024e 1016
63e3a861 1017 if (vdevid >= spa->spa_root_vdev->vdev_children) {
bc67cba7
PZ
1018 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1019 "blkptr at %p DVA %u has invalid VDEV %llu",
63e3a861 1020 bp, i, (longlong_t)vdevid);
ee3a23b8 1021 continue;
63e3a861 1022 }
1c27024e 1023 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
63e3a861 1024 if (vd == NULL) {
bc67cba7
PZ
1025 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1026 "blkptr at %p DVA %u has invalid VDEV %llu",
63e3a861 1027 bp, i, (longlong_t)vdevid);
ee3a23b8 1028 continue;
63e3a861
MA
1029 }
1030 if (vd->vdev_ops == &vdev_hole_ops) {
bc67cba7
PZ
1031 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1032 "blkptr at %p DVA %u has hole VDEV %llu",
63e3a861 1033 bp, i, (longlong_t)vdevid);
ee3a23b8 1034 continue;
63e3a861
MA
1035 }
1036 if (vd->vdev_ops == &vdev_missing_ops) {
1037 /*
1038 * "missing" vdevs are valid during import, but we
1039 * don't have their detailed info (e.g. asize), so
1040 * we can't perform any more checks on them.
1041 */
1042 continue;
1043 }
2b56a634
MA
1044 uint64_t offset = DVA_GET_OFFSET(dva);
1045 uint64_t asize = DVA_GET_ASIZE(dva);
1046 if (DVA_GET_GANG(dva))
1047 asize = vdev_gang_header_asize(vd);
63e3a861 1048 if (offset + asize > vd->vdev_asize) {
bc67cba7
PZ
1049 errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
1050 "blkptr at %p DVA %u has invalid OFFSET %llu",
63e3a861
MA
1051 bp, i, (longlong_t)offset);
1052 }
1053 }
f49db9b5
BB
1054 if (errors > 0)
1055 dprintf_bp(bp, "blkptr at %p dprintf_bp():", bp);
dc04a8c7
PD
1056 if (!config_held)
1057 spa_config_exit(spa, SCL_VDEV, bp);
bc67cba7
PZ
1058
1059 return (errors == 0);
63e3a861
MA
1060}
1061
6cb8e530
PZ
1062boolean_t
1063zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp)
1064{
14e4e3cb 1065 (void) bp;
6cb8e530
PZ
1066 uint64_t vdevid = DVA_GET_VDEV(dva);
1067
1068 if (vdevid >= spa->spa_root_vdev->vdev_children)
1069 return (B_FALSE);
1070
1071 vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
1072 if (vd == NULL)
1073 return (B_FALSE);
1074
1075 if (vd->vdev_ops == &vdev_hole_ops)
1076 return (B_FALSE);
1077
1078 if (vd->vdev_ops == &vdev_missing_ops) {
1079 return (B_FALSE);
1080 }
1081
1082 uint64_t offset = DVA_GET_OFFSET(dva);
1083 uint64_t asize = DVA_GET_ASIZE(dva);
1084
2b56a634
MA
1085 if (DVA_GET_GANG(dva))
1086 asize = vdev_gang_header_asize(vd);
6cb8e530
PZ
1087 if (offset + asize > vd->vdev_asize)
1088 return (B_FALSE);
1089
1090 return (B_TRUE);
1091}
1092
34dc7c2f 1093zio_t *
b128c09f 1094zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
a6255b7f 1095 abd_t *data, uint64_t size, zio_done_func_t *done, void *private,
5dbd68a3 1096 zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb)
34dc7c2f
BB
1097{
1098 zio_t *zio;
1099
428870ff 1100 zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp,
2aa34383 1101 data, size, size, done, private,
b128c09f 1102 ZIO_TYPE_READ, priority, flags, NULL, 0, zb,
428870ff
BB
1103 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
1104 ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE);
34dc7c2f 1105
b128c09f
BB
1106 return (zio);
1107}
34dc7c2f 1108
34dc7c2f 1109zio_t *
b128c09f 1110zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
a6255b7f 1111 abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp,
bc77ba73
PD
1112 zio_done_func_t *ready, zio_done_func_t *children_ready,
1113 zio_done_func_t *physdone, zio_done_func_t *done,
1114 void *private, zio_priority_t priority, enum zio_flag flags,
1115 const zbookmark_phys_t *zb)
34dc7c2f
BB
1116{
1117 zio_t *zio;
1118
b128c09f
BB
1119 ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF &&
1120 zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS &&
1121 zp->zp_compress >= ZIO_COMPRESS_OFF &&
1122 zp->zp_compress < ZIO_COMPRESS_FUNCTIONS &&
9ae529ec 1123 DMU_OT_IS_VALID(zp->zp_type) &&
b128c09f 1124 zp->zp_level < 32 &&
428870ff 1125 zp->zp_copies > 0 &&
03c6040b 1126 zp->zp_copies <= spa_max_replication(spa));
34dc7c2f 1127
2aa34383 1128 zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private,
b128c09f 1129 ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb,
428870ff
BB
1130 ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
1131 ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE);
34dc7c2f
BB
1132
1133 zio->io_ready = ready;
bc77ba73 1134 zio->io_children_ready = children_ready;
e8b96c60 1135 zio->io_physdone = physdone;
b128c09f 1136 zio->io_prop = *zp;
34dc7c2f 1137
9b67f605
MA
1138 /*
1139 * Data can be NULL if we are going to call zio_write_override() to
1140 * provide the already-allocated BP. But we may need the data to
1141 * verify a dedup hit (if requested). In this case, don't try to
b5256303
TC
1142 * dedup (just take the already-allocated BP verbatim). Encrypted
1143 * dedup blocks need data as well so we also disable dedup in this
1144 * case.
9b67f605 1145 */
b5256303
TC
1146 if (data == NULL &&
1147 (zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) {
9b67f605
MA
1148 zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE;
1149 }
1150
34dc7c2f
BB
1151 return (zio);
1152}
1153
1154zio_t *
a6255b7f 1155zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data,
e8b96c60 1156 uint64_t size, zio_done_func_t *done, void *private,
5dbd68a3 1157 zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb)
34dc7c2f
BB
1158{
1159 zio_t *zio;
1160
2aa34383 1161 zio = zio_create(pio, spa, txg, bp, data, size, size, done, private,
3dfb57a3 1162 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb,
b128c09f 1163 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE);
34dc7c2f
BB
1164
1165 return (zio);
1166}
1167
428870ff 1168void
03c6040b 1169zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite)
428870ff
BB
1170{
1171 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
1172 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1173 ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
1174 ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa));
1175
03c6040b
GW
1176 /*
1177 * We must reset the io_prop to match the values that existed
1178 * when the bp was first written by dmu_sync() keeping in mind
1179 * that nopwrite and dedup are mutually exclusive.
1180 */
1181 zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup;
1182 zio->io_prop.zp_nopwrite = nopwrite;
428870ff
BB
1183 zio->io_prop.zp_copies = copies;
1184 zio->io_bp_override = bp;
1185}
1186
1187void
1188zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
1189{
9b67f605 1190
bc67cba7 1191 (void) zfs_blkptr_verify(spa, bp, B_FALSE, BLK_VERIFY_HALT);
a1d477c2 1192
9b67f605
MA
1193 /*
1194 * The check for EMBEDDED is a performance optimization. We
1195 * process the free here (by ignoring it) rather than
1196 * putting it on the list and then processing it in zio_free_sync().
1197 */
1198 if (BP_IS_EMBEDDED(bp))
1199 return;
13fe0198 1200 metaslab_check_free(spa, bp);
2883cad5
MA
1201
1202 /*
1203 * Frees that are for the currently-syncing txg, are not going to be
1204 * deferred, and which will not need to do a read (i.e. not GANG or
1205 * DEDUP), can be processed immediately. Otherwise, put them on the
1206 * in-memory list for later processing.
93e28d66
SD
1207 *
1208 * Note that we only defer frees after zfs_sync_pass_deferred_free
1209 * when the log space map feature is disabled. [see relevant comment
1210 * in spa_sync_iterate_to_convergence()]
2883cad5 1211 */
93e28d66
SD
1212 if (BP_IS_GANG(bp) ||
1213 BP_GET_DEDUP(bp) ||
2883cad5 1214 txg != spa->spa_syncing_txg ||
93e28d66
SD
1215 (spa_sync_pass(spa) >= zfs_sync_pass_deferred_free &&
1216 !spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))) {
2883cad5
MA
1217 bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
1218 } else {
9cdf7b1f 1219 VERIFY3P(zio_free_sync(NULL, spa, txg, bp, 0), ==, NULL);
2883cad5 1220 }
428870ff
BB
1221}
1222
9cdf7b1f
MA
1223/*
1224 * To improve performance, this function may return NULL if we were able
1225 * to do the free immediately. This avoids the cost of creating a zio
1226 * (and linking it to the parent, etc).
1227 */
34dc7c2f 1228zio_t *
428870ff
BB
1229zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
1230 enum zio_flag flags)
34dc7c2f 1231{
428870ff
BB
1232 ASSERT(!BP_IS_HOLE(bp));
1233 ASSERT(spa_syncing_txg(spa) == txg);
34dc7c2f 1234
9b67f605 1235 if (BP_IS_EMBEDDED(bp))
9cdf7b1f 1236 return (NULL);
9b67f605 1237
13fe0198 1238 metaslab_check_free(spa, bp);
8c841793 1239 arc_freed(spa, bp);
d4a72f23 1240 dsl_scan_freed(spa, bp);
13fe0198 1241
9cdf7b1f
MA
1242 if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp)) {
1243 /*
1244 * GANG and DEDUP blocks can induce a read (for the gang block
1245 * header, or the DDT), so issue them asynchronously so that
1246 * this thread is not tied up.
1247 */
1248 enum zio_stage stage =
1249 ZIO_FREE_PIPELINE | ZIO_STAGE_ISSUE_ASYNC;
2883cad5 1250
9cdf7b1f
MA
1251 return (zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
1252 BP_GET_PSIZE(bp), NULL, NULL,
1253 ZIO_TYPE_FREE, ZIO_PRIORITY_NOW,
1254 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage));
1255 } else {
1256 metaslab_free(spa, bp, txg, B_FALSE);
1257 return (NULL);
1258 }
34dc7c2f
BB
1259}
1260
1261zio_t *
428870ff
BB
1262zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
1263 zio_done_func_t *done, void *private, enum zio_flag flags)
34dc7c2f
BB
1264{
1265 zio_t *zio;
1266
bc67cba7
PZ
1267 (void) zfs_blkptr_verify(spa, bp, flags & ZIO_FLAG_CONFIG_WRITER,
1268 BLK_VERIFY_HALT);
9b67f605
MA
1269
1270 if (BP_IS_EMBEDDED(bp))
1271 return (zio_null(pio, spa, NULL, NULL, NULL, 0));
1272
34dc7c2f
BB
1273 /*
1274 * A claim is an allocation of a specific block. Claims are needed
1275 * to support immediate writes in the intent log. The issue is that
1276 * immediate writes contain committed data, but in a txg that was
1277 * *not* committed. Upon opening the pool after an unclean shutdown,
1278 * the intent log claims all blocks that contain immediate write data
1279 * so that the SPA knows they're in use.
1280 *
1281 * All claims *must* be resolved in the first txg -- before the SPA
1282 * starts allocating blocks -- so that nothing is allocated twice.
428870ff 1283 * If txg == 0 we just verify that the block is claimable.
34dc7c2f 1284 */
d2734cce
SD
1285 ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <,
1286 spa_min_claim_txg(spa));
1287 ASSERT(txg == spa_min_claim_txg(spa) || txg == 0);
76d04993 1288 ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(8) */
34dc7c2f 1289
b128c09f 1290 zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
2aa34383
DK
1291 BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW,
1292 flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE);
3dfb57a3 1293 ASSERT0(zio->io_queued_timestamp);
34dc7c2f
BB
1294
1295 return (zio);
1296}
1297
1298zio_t *
1299zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd,
e8b96c60 1300 zio_done_func_t *done, void *private, enum zio_flag flags)
34dc7c2f
BB
1301{
1302 zio_t *zio;
1303 int c;
1304
1305 if (vd->vdev_children == 0) {
2aa34383 1306 zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
e8b96c60 1307 ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
34dc7c2f
BB
1308 ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE);
1309
34dc7c2f
BB
1310 zio->io_cmd = cmd;
1311 } else {
d164b209 1312 zio = zio_null(pio, spa, NULL, NULL, NULL, flags);
34dc7c2f
BB
1313
1314 for (c = 0; c < vd->vdev_children; c++)
1315 zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd,
e8b96c60 1316 done, private, flags));
34dc7c2f
BB
1317 }
1318
1319 return (zio);
1320}
1321
1b939560
BB
1322zio_t *
1323zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
1324 zio_done_func_t *done, void *private, zio_priority_t priority,
1325 enum zio_flag flags, enum trim_flag trim_flags)
1326{
1327 zio_t *zio;
1328
1329 ASSERT0(vd->vdev_children);
1330 ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
1331 ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift));
1332 ASSERT3U(size, !=, 0);
1333
1334 zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done,
1335 private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL,
1336 vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE);
1337 zio->io_trim_flags = trim_flags;
1338
1339 return (zio);
1340}
1341
34dc7c2f
BB
1342zio_t *
1343zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
a6255b7f 1344 abd_t *data, int checksum, zio_done_func_t *done, void *private,
e8b96c60 1345 zio_priority_t priority, enum zio_flag flags, boolean_t labels)
34dc7c2f
BB
1346{
1347 zio_t *zio;
34dc7c2f 1348
b128c09f
BB
1349 ASSERT(vd->vdev_children == 0);
1350 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
1351 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
1352 ASSERT3U(offset + size, <=, vd->vdev_psize);
34dc7c2f 1353
2aa34383
DK
1354 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
1355 private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd,
1356 offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE);
34dc7c2f 1357
b128c09f 1358 zio->io_prop.zp_checksum = checksum;
34dc7c2f
BB
1359
1360 return (zio);
1361}
1362
1363zio_t *
1364zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
a6255b7f 1365 abd_t *data, int checksum, zio_done_func_t *done, void *private,
e8b96c60 1366 zio_priority_t priority, enum zio_flag flags, boolean_t labels)
34dc7c2f 1367{
34dc7c2f 1368 zio_t *zio;
34dc7c2f 1369
b128c09f
BB
1370 ASSERT(vd->vdev_children == 0);
1371 ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
1372 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
1373 ASSERT3U(offset + size, <=, vd->vdev_psize);
34dc7c2f 1374
2aa34383
DK
1375 zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
1376 private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd,
1377 offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE);
34dc7c2f 1378
b128c09f 1379 zio->io_prop.zp_checksum = checksum;
34dc7c2f 1380
3c67d83a 1381 if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
34dc7c2f 1382 /*
428870ff 1383 * zec checksums are necessarily destructive -- they modify
b128c09f 1384 * the end of the write buffer to hold the verifier/checksum.
34dc7c2f 1385 * Therefore, we must make a local copy in case the data is
b128c09f 1386 * being written to multiple places in parallel.
34dc7c2f 1387 */
a6255b7f
DQ
1388 abd_t *wbuf = abd_alloc_sametype(data, size);
1389 abd_copy(wbuf, data, size);
1390
b128c09f 1391 zio_push_transform(zio, wbuf, size, size, NULL);
34dc7c2f
BB
1392 }
1393
1394 return (zio);
1395}
1396
1397/*
b128c09f 1398 * Create a child I/O to do some work for us.
34dc7c2f
BB
1399 */
1400zio_t *
b128c09f 1401zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
4ea3f864
GM
1402 abd_t *data, uint64_t size, int type, zio_priority_t priority,
1403 enum zio_flag flags, zio_done_func_t *done, void *private)
34dc7c2f 1404{
428870ff 1405 enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE;
b128c09f
BB
1406 zio_t *zio;
1407
a1d477c2
MA
1408 /*
1409 * vdev child I/Os do not propagate their error to the parent.
1410 * Therefore, for correct operation the caller *must* check for
1411 * and handle the error in the child i/o's done callback.
1412 * The only exceptions are i/os that we don't care about
1413 * (OPTIONAL or REPAIR).
1414 */
1415 ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) ||
1416 done != NULL);
1417
34dc7c2f
BB
1418 if (type == ZIO_TYPE_READ && bp != NULL) {
1419 /*
1420 * If we have the bp, then the child should perform the
1421 * checksum and the parent need not. This pushes error
1422 * detection as close to the leaves as possible and
1423 * eliminates redundant checksums in the interior nodes.
1424 */
428870ff
BB
1425 pipeline |= ZIO_STAGE_CHECKSUM_VERIFY;
1426 pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
34dc7c2f
BB
1427 }
1428
a1d477c2
MA
1429 if (vd->vdev_ops->vdev_op_leaf) {
1430 ASSERT0(vd->vdev_children);
b128c09f 1431 offset += VDEV_LABEL_START_SIZE;
a1d477c2 1432 }
b128c09f 1433
a1d477c2 1434 flags |= ZIO_VDEV_CHILD_FLAGS(pio);
428870ff
BB
1435
1436 /*
1437 * If we've decided to do a repair, the write is not speculative --
1438 * even if the original read was.
1439 */
1440 if (flags & ZIO_FLAG_IO_REPAIR)
1441 flags &= ~ZIO_FLAG_SPECULATIVE;
1442
3dfb57a3
DB
1443 /*
1444 * If we're creating a child I/O that is not associated with a
1445 * top-level vdev, then the child zio is not an allocating I/O.
1446 * If this is a retried I/O then we ignore it since we will
1447 * have already processed the original allocating I/O.
1448 */
1449 if (flags & ZIO_FLAG_IO_ALLOCATING &&
1450 (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) {
cc99f275
DB
1451 ASSERT(pio->io_metaslab_class != NULL);
1452 ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled);
3dfb57a3
DB
1453 ASSERT(type == ZIO_TYPE_WRITE);
1454 ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE);
1455 ASSERT(!(flags & ZIO_FLAG_IO_REPAIR));
1456 ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) ||
1457 pio->io_child_type == ZIO_CHILD_GANG);
1458
1459 flags &= ~ZIO_FLAG_IO_ALLOCATING;
1460 }
1461
1462
2aa34383 1463 zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size,
428870ff
BB
1464 done, private, type, priority, flags, vd, offset, &pio->io_bookmark,
1465 ZIO_STAGE_VDEV_IO_START >> 1, pipeline);
3dfb57a3 1466 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
34dc7c2f 1467
e8b96c60
MA
1468 zio->io_physdone = pio->io_physdone;
1469 if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL)
1470 zio->io_logical->io_phys_children++;
1471
b128c09f 1472 return (zio);
34dc7c2f
BB
1473}
1474
b128c09f 1475zio_t *
a6255b7f 1476zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size,
9e052db4 1477 zio_type_t type, zio_priority_t priority, enum zio_flag flags,
e9aa730c 1478 zio_done_func_t *done, void *private)
34dc7c2f 1479{
b128c09f 1480 zio_t *zio;
34dc7c2f 1481
b128c09f 1482 ASSERT(vd->vdev_ops->vdev_op_leaf);
34dc7c2f 1483
b128c09f 1484 zio = zio_create(NULL, vd->vdev_spa, 0, NULL,
2aa34383 1485 data, size, size, done, private, type, priority,
e8b96c60 1486 flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED,
b128c09f 1487 vd, offset, NULL,
428870ff 1488 ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE);
34dc7c2f 1489
b128c09f 1490 return (zio);
34dc7c2f
BB
1491}
1492
1493void
b128c09f 1494zio_flush(zio_t *zio, vdev_t *vd)
34dc7c2f 1495{
b128c09f 1496 zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE,
e8b96c60 1497 NULL, NULL,
b128c09f 1498 ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY));
34dc7c2f
BB
1499}
1500
428870ff
BB
1501void
1502zio_shrink(zio_t *zio, uint64_t size)
1503{
1ce23dca
PS
1504 ASSERT3P(zio->io_executor, ==, NULL);
1505 ASSERT3U(zio->io_orig_size, ==, zio->io_size);
1506 ASSERT3U(size, <=, zio->io_size);
428870ff
BB
1507
1508 /*
1509 * We don't shrink for raidz because of problems with the
1510 * reconstruction when reading back less than the block size.
1511 * Note, BP_IS_RAIDZ() assumes no compression.
1512 */
1513 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
2aa34383
DK
1514 if (!BP_IS_RAIDZ(zio->io_bp)) {
1515 /* we are not doing a raw write */
1516 ASSERT3U(zio->io_size, ==, zio->io_lsize);
1517 zio->io_orig_size = zio->io_size = zio->io_lsize = size;
1518 }
428870ff
BB
1519}
1520
34dc7c2f
BB
1521/*
1522 * ==========================================================================
b128c09f 1523 * Prepare to read and write logical blocks
34dc7c2f
BB
1524 * ==========================================================================
1525 */
b128c09f 1526
62840030 1527static zio_t *
b128c09f 1528zio_read_bp_init(zio_t *zio)
34dc7c2f 1529{
b128c09f 1530 blkptr_t *bp = zio->io_bp;
b5256303
TC
1531 uint64_t psize =
1532 BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp);
34dc7c2f 1533
a1d477c2
MA
1534 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
1535
fb5f0bc8 1536 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
9babb374 1537 zio->io_child_type == ZIO_CHILD_LOGICAL &&
b5256303 1538 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
a6255b7f
DQ
1539 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
1540 psize, psize, zio_decompress);
34dc7c2f 1541 }
34dc7c2f 1542
b5256303
TC
1543 if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) ||
1544 BP_HAS_INDIRECT_MAC_CKSUM(bp)) &&
1545 zio->io_child_type == ZIO_CHILD_LOGICAL) {
1546 zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
1547 psize, psize, zio_decrypt);
1548 }
1549
9b67f605 1550 if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) {
a6255b7f
DQ
1551 int psize = BPE_GET_PSIZE(bp);
1552 void *data = abd_borrow_buf(zio->io_abd, psize);
1553
9b67f605 1554 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
a6255b7f
DQ
1555 decode_embedded_bp_compressed(bp, data);
1556 abd_return_buf_copy(zio->io_abd, data, psize);
9b67f605
MA
1557 } else {
1558 ASSERT(!BP_IS_EMBEDDED(bp));
a1d477c2 1559 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
9b67f605
MA
1560 }
1561
9ae529ec 1562 if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0)
b128c09f
BB
1563 zio->io_flags |= ZIO_FLAG_DONT_CACHE;
1564
428870ff
BB
1565 if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP)
1566 zio->io_flags |= ZIO_FLAG_DONT_CACHE;
1567
1568 if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL)
1569 zio->io_pipeline = ZIO_DDT_READ_PIPELINE;
1570
62840030 1571 return (zio);
34dc7c2f
BB
1572}
1573
62840030 1574static zio_t *
b128c09f 1575zio_write_bp_init(zio_t *zio)
34dc7c2f 1576{
b128c09f 1577 if (!IO_IS_ALLOCATING(zio))
62840030 1578 return (zio);
34dc7c2f 1579
428870ff
BB
1580 ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1581
1582 if (zio->io_bp_override) {
3dfb57a3
DB
1583 blkptr_t *bp = zio->io_bp;
1584 zio_prop_t *zp = &zio->io_prop;
1585
428870ff
BB
1586 ASSERT(bp->blk_birth != zio->io_txg);
1587 ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0);
1588
1589 *bp = *zio->io_bp_override;
1590 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1591
9b67f605 1592 if (BP_IS_EMBEDDED(bp))
62840030 1593 return (zio);
9b67f605 1594
03c6040b
GW
1595 /*
1596 * If we've been overridden and nopwrite is set then
1597 * set the flag accordingly to indicate that a nopwrite
1598 * has already occurred.
1599 */
1600 if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) {
1601 ASSERT(!zp->zp_dedup);
3dfb57a3 1602 ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum);
03c6040b 1603 zio->io_flags |= ZIO_FLAG_NOPWRITE;
62840030 1604 return (zio);
03c6040b
GW
1605 }
1606
1607 ASSERT(!zp->zp_nopwrite);
1608
428870ff 1609 if (BP_IS_HOLE(bp) || !zp->zp_dedup)
62840030 1610 return (zio);
428870ff 1611
3c67d83a
TH
1612 ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags &
1613 ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify);
428870ff 1614
b5256303
TC
1615 if (BP_GET_CHECKSUM(bp) == zp->zp_checksum &&
1616 !zp->zp_encrypt) {
428870ff
BB
1617 BP_SET_DEDUP(bp, 1);
1618 zio->io_pipeline |= ZIO_STAGE_DDT_WRITE;
62840030 1619 return (zio);
428870ff 1620 }
3dfb57a3
DB
1621
1622 /*
1623 * We were unable to handle this as an override bp, treat
1624 * it as a regular write I/O.
1625 */
5511754b 1626 zio->io_bp_override = NULL;
3dfb57a3
DB
1627 *bp = zio->io_bp_orig;
1628 zio->io_pipeline = zio->io_orig_pipeline;
1629 }
1630
62840030 1631 return (zio);
3dfb57a3
DB
1632}
1633
62840030 1634static zio_t *
3dfb57a3
DB
1635zio_write_compress(zio_t *zio)
1636{
1637 spa_t *spa = zio->io_spa;
1638 zio_prop_t *zp = &zio->io_prop;
1639 enum zio_compress compress = zp->zp_compress;
1640 blkptr_t *bp = zio->io_bp;
1641 uint64_t lsize = zio->io_lsize;
1642 uint64_t psize = zio->io_size;
1643 int pass = 1;
1644
3dfb57a3
DB
1645 /*
1646 * If our children haven't all reached the ready stage,
1647 * wait for them and then repeat this pipeline stage.
1648 */
ddc751d5
GW
1649 if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
1650 ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) {
62840030 1651 return (NULL);
ddc751d5 1652 }
3dfb57a3
DB
1653
1654 if (!IO_IS_ALLOCATING(zio))
62840030 1655 return (zio);
3dfb57a3
DB
1656
1657 if (zio->io_children_ready != NULL) {
1658 /*
1659 * Now that all our children are ready, run the callback
1660 * associated with this zio in case it wants to modify the
1661 * data to be written.
1662 */
1663 ASSERT3U(zp->zp_level, >, 0);
1664 zio->io_children_ready(zio);
428870ff 1665 }
34dc7c2f 1666
3dfb57a3
DB
1667 ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
1668 ASSERT(zio->io_bp_override == NULL);
1669
b0bc7a84 1670 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) {
b128c09f
BB
1671 /*
1672 * We're rewriting an existing block, which means we're
1673 * working on behalf of spa_sync(). For spa_sync() to
1674 * converge, it must eventually be the case that we don't
1675 * have to allocate new blocks. But compression changes
1676 * the blocksize, which forces a reallocate, and makes
1677 * convergence take longer. Therefore, after the first
1678 * few passes, stop compressing to ensure convergence.
1679 */
428870ff
BB
1680 pass = spa_sync_pass(spa);
1681
1682 ASSERT(zio->io_txg == spa_syncing_txg(spa));
1683 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1684 ASSERT(!BP_GET_DEDUP(bp));
34dc7c2f 1685
55d85d5a 1686 if (pass >= zfs_sync_pass_dont_compress)
b128c09f 1687 compress = ZIO_COMPRESS_OFF;
34dc7c2f 1688
b128c09f 1689 /* Make sure someone doesn't change their mind on overwrites */
9b67f605 1690 ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp),
428870ff 1691 spa_max_replication(spa)) == BP_GET_NDVAS(bp));
b128c09f 1692 }
34dc7c2f 1693
2aa34383 1694 /* If it's a compressed write that is not raw, compress the buffer. */
b5256303
TC
1695 if (compress != ZIO_COMPRESS_OFF &&
1696 !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
428870ff 1697 void *cbuf = zio_buf_alloc(lsize);
10b3c7f5
MN
1698 psize = zio_compress_data(compress, zio->io_abd, cbuf, lsize,
1699 zp->zp_complevel);
1700 if (psize == 0 || psize >= lsize) {
b128c09f 1701 compress = ZIO_COMPRESS_OFF;
428870ff 1702 zio_buf_free(cbuf, lsize);
b5256303
TC
1703 } else if (!zp->zp_dedup && !zp->zp_encrypt &&
1704 psize <= BPE_PAYLOAD_SIZE &&
9b67f605
MA
1705 zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) &&
1706 spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) {
1707 encode_embedded_bp_compressed(bp,
1708 cbuf, compress, lsize, psize);
1709 BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA);
1710 BP_SET_TYPE(bp, zio->io_prop.zp_type);
1711 BP_SET_LEVEL(bp, zio->io_prop.zp_level);
1712 zio_buf_free(cbuf, lsize);
1713 bp->blk_birth = zio->io_txg;
1714 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1715 ASSERT(spa_feature_is_active(spa,
1716 SPA_FEATURE_EMBEDDED_DATA));
62840030 1717 return (zio);
428870ff 1718 } else {
9b67f605 1719 /*
b2255edc
BB
1720 * Round compressed size up to the minimum allocation
1721 * size of the smallest-ashift device, and zero the
1722 * tail. This ensures that the compressed size of the
1723 * BP (and thus compressratio property) are correct,
c3520e7f
MA
1724 * in that we charge for the padding used to fill out
1725 * the last sector.
9b67f605 1726 */
b2255edc
BB
1727 ASSERT3U(spa->spa_min_alloc, >=, SPA_MINBLOCKSHIFT);
1728 size_t rounded = (size_t)roundup(psize,
1729 spa->spa_min_alloc);
c3520e7f 1730 if (rounded >= lsize) {
9b67f605
MA
1731 compress = ZIO_COMPRESS_OFF;
1732 zio_buf_free(cbuf, lsize);
c3520e7f 1733 psize = lsize;
9b67f605 1734 } else {
a6255b7f
DQ
1735 abd_t *cdata = abd_get_from_buf(cbuf, lsize);
1736 abd_take_ownership_of_buf(cdata, B_TRUE);
1737 abd_zero_off(cdata, psize, rounded - psize);
c3520e7f 1738 psize = rounded;
a6255b7f 1739 zio_push_transform(zio, cdata,
9b67f605
MA
1740 psize, lsize, NULL);
1741 }
b128c09f 1742 }
3dfb57a3
DB
1743
1744 /*
1745 * We were unable to handle this as an override bp, treat
1746 * it as a regular write I/O.
1747 */
1748 zio->io_bp_override = NULL;
1749 *bp = zio->io_bp_orig;
1750 zio->io_pipeline = zio->io_orig_pipeline;
1751
b1d21733
TC
1752 } else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 &&
1753 zp->zp_type == DMU_OT_DNODE) {
1754 /*
1755 * The DMU actually relies on the zio layer's compression
1756 * to free metadnode blocks that have had all contained
1757 * dnodes freed. As a result, even when doing a raw
1758 * receive, we must check whether the block can be compressed
1759 * to a hole.
1760 */
1761 psize = zio_compress_data(ZIO_COMPRESS_EMPTY,
10b3c7f5
MN
1762 zio->io_abd, NULL, lsize, zp->zp_complevel);
1763 if (psize == 0 || psize >= lsize)
b1d21733 1764 compress = ZIO_COMPRESS_OFF;
52a36bd4
GA
1765 } else if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS &&
1766 !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) {
1767 /*
1768 * If we are raw receiving an encrypted dataset we should not
1769 * take this codepath because it will change the on-disk block
1770 * and decryption will fail.
1771 */
c634320e
PD
1772 size_t rounded = MIN((size_t)roundup(psize,
1773 spa->spa_min_alloc), lsize);
1774
1775 if (rounded != psize) {
1776 abd_t *cdata = abd_alloc_linear(rounded, B_TRUE);
1777 abd_zero_off(cdata, psize, rounded - psize);
1778 abd_copy_off(cdata, zio->io_abd, 0, 0, psize);
1779 psize = rounded;
1780 zio_push_transform(zio, cdata,
1781 psize, rounded, NULL);
1782 }
2aa34383
DK
1783 } else {
1784 ASSERT3U(psize, !=, 0);
b128c09f 1785 }
34dc7c2f 1786
b128c09f
BB
1787 /*
1788 * The final pass of spa_sync() must be all rewrites, but the first
1789 * few passes offer a trade-off: allocating blocks defers convergence,
1790 * but newly allocated blocks are sequential, so they can be written
1791 * to disk faster. Therefore, we allow the first few passes of
1792 * spa_sync() to allocate new blocks, but force rewrites after that.
1793 * There should only be a handful of blocks after pass 1 in any case.
1794 */
b0bc7a84
MG
1795 if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg &&
1796 BP_GET_PSIZE(bp) == psize &&
55d85d5a 1797 pass >= zfs_sync_pass_rewrite) {
cc99f275 1798 VERIFY3U(psize, !=, 0);
1c27024e 1799 enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
cc99f275 1800
b128c09f
BB
1801 zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
1802 zio->io_flags |= ZIO_FLAG_IO_REWRITE;
1803 } else {
1804 BP_ZERO(bp);
1805 zio->io_pipeline = ZIO_WRITE_PIPELINE;
1806 }
34dc7c2f 1807
428870ff 1808 if (psize == 0) {
b0bc7a84
MG
1809 if (zio->io_bp_orig.blk_birth != 0 &&
1810 spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
1811 BP_SET_LSIZE(bp, lsize);
1812 BP_SET_TYPE(bp, zp->zp_type);
1813 BP_SET_LEVEL(bp, zp->zp_level);
1814 BP_SET_BIRTH(bp, zio->io_txg, 0);
1815 }
b128c09f
BB
1816 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
1817 } else {
1818 ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER);
1819 BP_SET_LSIZE(bp, lsize);
b0bc7a84
MG
1820 BP_SET_TYPE(bp, zp->zp_type);
1821 BP_SET_LEVEL(bp, zp->zp_level);
428870ff 1822 BP_SET_PSIZE(bp, psize);
b128c09f
BB
1823 BP_SET_COMPRESS(bp, compress);
1824 BP_SET_CHECKSUM(bp, zp->zp_checksum);
428870ff 1825 BP_SET_DEDUP(bp, zp->zp_dedup);
b128c09f 1826 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
428870ff
BB
1827 if (zp->zp_dedup) {
1828 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1829 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
b5256303
TC
1830 ASSERT(!zp->zp_encrypt ||
1831 DMU_OT_IS_ENCRYPTED(zp->zp_type));
428870ff
BB
1832 zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE;
1833 }
03c6040b
GW
1834 if (zp->zp_nopwrite) {
1835 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
1836 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
1837 zio->io_pipeline |= ZIO_STAGE_NOP_WRITE;
1838 }
428870ff 1839 }
62840030 1840 return (zio);
428870ff
BB
1841}
1842
62840030 1843static zio_t *
428870ff
BB
1844zio_free_bp_init(zio_t *zio)
1845{
1846 blkptr_t *bp = zio->io_bp;
1847
1848 if (zio->io_child_type == ZIO_CHILD_LOGICAL) {
1849 if (BP_GET_DEDUP(bp))
1850 zio->io_pipeline = ZIO_DDT_FREE_PIPELINE;
b128c09f 1851 }
34dc7c2f 1852
a1d477c2
MA
1853 ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
1854
62840030 1855 return (zio);
34dc7c2f
BB
1856}
1857
b128c09f
BB
1858/*
1859 * ==========================================================================
1860 * Execute the I/O pipeline
1861 * ==========================================================================
1862 */
1863
1864static void
7ef5e54e 1865zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
34dc7c2f 1866{
428870ff 1867 spa_t *spa = zio->io_spa;
b128c09f 1868 zio_type_t t = zio->io_type;
a38718a6 1869 int flags = (cutinline ? TQ_FRONT : 0);
34dc7c2f
BB
1870
1871 /*
9babb374
BB
1872 * If we're a config writer or a probe, the normal issue and
1873 * interrupt threads may all be blocked waiting for the config lock.
1874 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL.
34dc7c2f 1875 */
9babb374 1876 if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE))
b128c09f 1877 t = ZIO_TYPE_NULL;
34dc7c2f
BB
1878
1879 /*
b128c09f 1880 * A similar issue exists for the L2ARC write thread until L2ARC 2.0.
34dc7c2f 1881 */
b128c09f
BB
1882 if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux)
1883 t = ZIO_TYPE_NULL;
34dc7c2f 1884
428870ff 1885 /*
7ef5e54e
AL
1886 * If this is a high priority I/O, then use the high priority taskq if
1887 * available.
428870ff 1888 */
18b14b17
GW
1889 if ((zio->io_priority == ZIO_PRIORITY_NOW ||
1890 zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) &&
7ef5e54e 1891 spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
428870ff
BB
1892 q++;
1893
1894 ASSERT3U(q, <, ZIO_TASKQ_TYPES);
5cc556b4 1895
a38718a6
GA
1896 /*
1897 * NB: We are assuming that the zio can only be dispatched
1898 * to a single taskq at a time. It would be a grievous error
1899 * to dispatch the zio to another taskq at the same time.
1900 */
1901 ASSERT(taskq_empty_ent(&zio->io_tqent));
23c13c7e
AL
1902 spa_taskq_dispatch_ent(spa, t, q, zio_execute, zio, flags,
1903 &zio->io_tqent);
b128c09f 1904}
34dc7c2f 1905
b128c09f 1906static boolean_t
7ef5e54e 1907zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
b128c09f 1908{
b128c09f 1909 spa_t *spa = zio->io_spa;
34dc7c2f 1910
b3212d2f
MA
1911 taskq_t *tq = taskq_of_curthread();
1912
1c27024e 1913 for (zio_type_t t = 0; t < ZIO_TYPES; t++) {
7ef5e54e
AL
1914 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
1915 uint_t i;
1916 for (i = 0; i < tqs->stqs_count; i++) {
b3212d2f 1917 if (tqs->stqs_taskq[i] == tq)
7ef5e54e
AL
1918 return (B_TRUE);
1919 }
1920 }
34dc7c2f 1921
b128c09f
BB
1922 return (B_FALSE);
1923}
34dc7c2f 1924
62840030 1925static zio_t *
b128c09f
BB
1926zio_issue_async(zio_t *zio)
1927{
428870ff 1928 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
b128c09f 1929
62840030 1930 return (NULL);
34dc7c2f
BB
1931}
1932
b128c09f 1933void
23c13c7e 1934zio_interrupt(void *zio)
34dc7c2f 1935{
428870ff 1936 zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE);
b128c09f 1937}
34dc7c2f 1938
d3c1e45b
MM
1939void
1940zio_delay_interrupt(zio_t *zio)
1941{
1942 /*
1943 * The timeout_generic() function isn't defined in userspace, so
1944 * rather than trying to implement the function, the zio delay
1945 * functionality has been disabled for userspace builds.
1946 */
1947
1948#ifdef _KERNEL
1949 /*
1950 * If io_target_timestamp is zero, then no delay has been registered
1951 * for this IO, thus jump to the end of this function and "skip" the
1952 * delay; issuing it directly to the zio layer.
1953 */
1954 if (zio->io_target_timestamp != 0) {
1955 hrtime_t now = gethrtime();
1956
1957 if (now >= zio->io_target_timestamp) {
1958 /*
1959 * This IO has already taken longer than the target
1960 * delay to complete, so we don't want to delay it
1961 * any longer; we "miss" the delay and issue it
1962 * directly to the zio layer. This is likely due to
1963 * the target latency being set to a value less than
1964 * the underlying hardware can satisfy (e.g. delay
1965 * set to 1ms, but the disks take 10ms to complete an
1966 * IO request).
1967 */
1968
1969 DTRACE_PROBE2(zio__delay__miss, zio_t *, zio,
1970 hrtime_t, now);
1971
1972 zio_interrupt(zio);
1973 } else {
1974 taskqid_t tid;
1975 hrtime_t diff = zio->io_target_timestamp - now;
1976 clock_t expire_at_tick = ddi_get_lbolt() +
1977 NSEC_TO_TICK(diff);
1978
1979 DTRACE_PROBE3(zio__delay__hit, zio_t *, zio,
1980 hrtime_t, now, hrtime_t, diff);
1981
1982 if (NSEC_TO_TICK(diff) == 0) {
1983 /* Our delay is less than a jiffy - just spin */
1984 zfs_sleep_until(zio->io_target_timestamp);
1985 zio_interrupt(zio);
1986 } else {
1987 /*
1988 * Use taskq_dispatch_delay() in the place of
1989 * OpenZFS's timeout_generic().
1990 */
1991 tid = taskq_dispatch_delay(system_taskq,
23c13c7e
AL
1992 zio_interrupt, zio, TQ_NOSLEEP,
1993 expire_at_tick);
d3c1e45b
MM
1994 if (tid == TASKQID_INVALID) {
1995 /*
1996 * Couldn't allocate a task. Just
1997 * finish the zio without a delay.
1998 */
1999 zio_interrupt(zio);
2000 }
2001 }
2002 }
2003 return;
2004 }
2005#endif
2006 DTRACE_PROBE1(zio__delay__skip, zio_t *, zio);
2007 zio_interrupt(zio);
2008}
2009
8fb1ede1 2010static void
638dd5f4 2011zio_deadman_impl(zio_t *pio, int ziodepth)
8fb1ede1
BB
2012{
2013 zio_t *cio, *cio_next;
2014 zio_link_t *zl = NULL;
2015 vdev_t *vd = pio->io_vd;
2016
638dd5f4
TC
2017 if (zio_deadman_log_all || (vd != NULL && vd->vdev_ops->vdev_op_leaf)) {
2018 vdev_queue_t *vq = vd ? &vd->vdev_queue : NULL;
8fb1ede1
BB
2019 zbookmark_phys_t *zb = &pio->io_bookmark;
2020 uint64_t delta = gethrtime() - pio->io_timestamp;
2021 uint64_t failmode = spa_get_deadman_failmode(pio->io_spa);
2022
a887d653 2023 zfs_dbgmsg("slow zio[%d]: zio=%px timestamp=%llu "
8fb1ede1 2024 "delta=%llu queued=%llu io=%llu "
8e739b2c
RE
2025 "path=%s "
2026 "last=%llu type=%d "
2027 "priority=%d flags=0x%x stage=0x%x "
2028 "pipeline=0x%x pipeline-trace=0x%x "
2029 "objset=%llu object=%llu "
2030 "level=%llu blkid=%llu "
2031 "offset=%llu size=%llu "
2032 "error=%d",
638dd5f4 2033 ziodepth, pio, pio->io_timestamp,
8e739b2c
RE
2034 (u_longlong_t)delta, pio->io_delta, pio->io_delay,
2035 vd ? vd->vdev_path : "NULL",
2036 vq ? vq->vq_io_complete_ts : 0, pio->io_type,
2037 pio->io_priority, pio->io_flags, pio->io_stage,
2038 pio->io_pipeline, pio->io_pipeline_trace,
2039 (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
2040 (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid,
2041 (u_longlong_t)pio->io_offset, (u_longlong_t)pio->io_size,
2042 pio->io_error);
1144586b 2043 (void) zfs_ereport_post(FM_EREPORT_ZFS_DEADMAN,
4f072827 2044 pio->io_spa, vd, zb, pio, 0);
8fb1ede1
BB
2045
2046 if (failmode == ZIO_FAILURE_MODE_CONTINUE &&
2047 taskq_empty_ent(&pio->io_tqent)) {
2048 zio_interrupt(pio);
2049 }
2050 }
2051
2052 mutex_enter(&pio->io_lock);
2053 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
2054 cio_next = zio_walk_children(pio, &zl);
638dd5f4 2055 zio_deadman_impl(cio, ziodepth + 1);
8fb1ede1
BB
2056 }
2057 mutex_exit(&pio->io_lock);
2058}
2059
2060/*
2061 * Log the critical information describing this zio and all of its children
2062 * using the zfs_dbgmsg() interface then post deadman event for the ZED.
2063 */
2064void
2065zio_deadman(zio_t *pio, char *tag)
2066{
2067 spa_t *spa = pio->io_spa;
2068 char *name = spa_name(spa);
2069
2070 if (!zfs_deadman_enabled || spa_suspended(spa))
2071 return;
2072
638dd5f4 2073 zio_deadman_impl(pio, 0);
8fb1ede1
BB
2074
2075 switch (spa_get_deadman_failmode(spa)) {
2076 case ZIO_FAILURE_MODE_WAIT:
2077 zfs_dbgmsg("%s waiting for hung I/O to pool '%s'", tag, name);
2078 break;
2079
2080 case ZIO_FAILURE_MODE_CONTINUE:
2081 zfs_dbgmsg("%s restarting hung I/O for pool '%s'", tag, name);
2082 break;
2083
2084 case ZIO_FAILURE_MODE_PANIC:
2085 fm_panic("%s determined I/O to pool '%s' is hung.", tag, name);
2086 break;
2087 }
2088}
2089
b128c09f
BB
2090/*
2091 * Execute the I/O pipeline until one of the following occurs:
2092 * (1) the I/O completes; (2) the pipeline stalls waiting for
2093 * dependent child I/Os; (3) the I/O issues, so we're waiting
2094 * for an I/O completion interrupt; (4) the I/O is delegated by
2095 * vdev-level caching or aggregation; (5) the I/O is deferred
2096 * due to vdev-level queueing; (6) the I/O is handed off to
2097 * another thread. In all cases, the pipeline stops whenever
8e07b99b 2098 * there's no CPU work; it never burns a thread in cv_wait_io().
b128c09f
BB
2099 *
2100 * There's no locking on io_stage because there's no legitimate way
2101 * for multiple threads to be attempting to process the same I/O.
2102 */
428870ff 2103static zio_pipe_stage_t *zio_pipeline[];
34dc7c2f 2104
da6b4005
NB
2105/*
2106 * zio_execute() is a wrapper around the static function
2107 * __zio_execute() so that we can force __zio_execute() to be
2108 * inlined. This reduces stack overhead which is important
2109 * because __zio_execute() is called recursively in several zio
2110 * code paths. zio_execute() itself cannot be inlined because
2111 * it is externally visible.
2112 */
b128c09f 2113void
23c13c7e 2114zio_execute(void *zio)
da6b4005 2115{
92119cc2
BB
2116 fstrans_cookie_t cookie;
2117
2118 cookie = spl_fstrans_mark();
da6b4005 2119 __zio_execute(zio);
92119cc2 2120 spl_fstrans_unmark(cookie);
da6b4005
NB
2121}
2122
b58986ee
BB
2123/*
2124 * Used to determine if in the current context the stack is sized large
2125 * enough to allow zio_execute() to be called recursively. A minimum
2126 * stack size of 16K is required to avoid needing to re-dispatch the zio.
2127 */
65c7cc49 2128static boolean_t
b58986ee
BB
2129zio_execute_stack_check(zio_t *zio)
2130{
2131#if !defined(HAVE_LARGE_STACKS)
2132 dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
2133
2134 /* Executing in txg_sync_thread() context. */
2135 if (dp && curthread == dp->dp_tx.tx_sync_thread)
2136 return (B_TRUE);
2137
2138 /* Pool initialization outside of zio_taskq context. */
2139 if (dp && spa_is_initializing(dp->dp_spa) &&
2140 !zio_taskq_member(zio, ZIO_TASKQ_ISSUE) &&
2141 !zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH))
2142 return (B_TRUE);
14e4e3cb
AZ
2143#else
2144 (void) zio;
b58986ee
BB
2145#endif /* HAVE_LARGE_STACKS */
2146
2147 return (B_FALSE);
2148}
2149
da6b4005
NB
2150__attribute__((always_inline))
2151static inline void
2152__zio_execute(zio_t *zio)
b128c09f 2153{
3dfb57a3
DB
2154 ASSERT3U(zio->io_queued_timestamp, >, 0);
2155
b128c09f 2156 while (zio->io_stage < ZIO_STAGE_DONE) {
428870ff
BB
2157 enum zio_stage pipeline = zio->io_pipeline;
2158 enum zio_stage stage = zio->io_stage;
62840030
MA
2159
2160 zio->io_executor = curthread;
34dc7c2f 2161
b128c09f 2162 ASSERT(!MUTEX_HELD(&zio->io_lock));
428870ff
BB
2163 ASSERT(ISP2(stage));
2164 ASSERT(zio->io_stall == NULL);
34dc7c2f 2165
428870ff
BB
2166 do {
2167 stage <<= 1;
2168 } while ((stage & pipeline) == 0);
b128c09f
BB
2169
2170 ASSERT(stage <= ZIO_STAGE_DONE);
34dc7c2f
BB
2171
2172 /*
b128c09f
BB
2173 * If we are in interrupt context and this pipeline stage
2174 * will grab a config lock that is held across I/O,
428870ff
BB
2175 * or may wait for an I/O that needs an interrupt thread
2176 * to complete, issue async to avoid deadlock.
2177 *
2178 * For VDEV_IO_START, we cut in line so that the io will
2179 * be sent to disk promptly.
34dc7c2f 2180 */
91579709
BB
2181 if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
2182 zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
b58986ee
BB
2183 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
2184 zio_requeue_io_start_cut_in_line : B_FALSE;
91579709
BB
2185 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
2186 return;
2187 }
2188
2189 /*
b58986ee
BB
2190 * If the current context doesn't have large enough stacks
2191 * the zio must be issued asynchronously to prevent overflow.
91579709 2192 */
b58986ee
BB
2193 if (zio_execute_stack_check(zio)) {
2194 boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
2195 zio_requeue_io_start_cut_in_line : B_FALSE;
428870ff 2196 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
b128c09f 2197 return;
34dc7c2f
BB
2198 }
2199
b128c09f 2200 zio->io_stage = stage;
3dfb57a3 2201 zio->io_pipeline_trace |= zio->io_stage;
34dc7c2f 2202
62840030
MA
2203 /*
2204 * The zio pipeline stage returns the next zio to execute
2205 * (typically the same as this one), or NULL if we should
2206 * stop.
2207 */
2208 zio = zio_pipeline[highbit64(stage) - 1](zio);
34dc7c2f 2209
62840030
MA
2210 if (zio == NULL)
2211 return;
b128c09f 2212 }
34dc7c2f
BB
2213}
2214
da6b4005 2215
b128c09f
BB
2216/*
2217 * ==========================================================================
2218 * Initiate I/O, either sync or async
2219 * ==========================================================================
2220 */
2221int
2222zio_wait(zio_t *zio)
34dc7c2f 2223{
9cdf7b1f
MA
2224 /*
2225 * Some routines, like zio_free_sync(), may return a NULL zio
2226 * to avoid the performance overhead of creating and then destroying
2227 * an unneeded zio. For the callers' simplicity, we accept a NULL
2228 * zio and ignore it.
2229 */
2230 if (zio == NULL)
2231 return (0);
2232
8fb1ede1 2233 long timeout = MSEC_TO_TICK(zfs_deadman_ziotime_ms);
b128c09f 2234 int error;
34dc7c2f 2235
1ce23dca
PS
2236 ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN);
2237 ASSERT3P(zio->io_executor, ==, NULL);
34dc7c2f 2238
b128c09f 2239 zio->io_waiter = curthread;
3dfb57a3
DB
2240 ASSERT0(zio->io_queued_timestamp);
2241 zio->io_queued_timestamp = gethrtime();
34dc7c2f 2242
da6b4005 2243 __zio_execute(zio);
34dc7c2f 2244
b128c09f 2245 mutex_enter(&zio->io_lock);
8fb1ede1
BB
2246 while (zio->io_executor != NULL) {
2247 error = cv_timedwait_io(&zio->io_cv, &zio->io_lock,
2248 ddi_get_lbolt() + timeout);
2249
2250 if (zfs_deadman_enabled && error == -1 &&
2251 gethrtime() - zio->io_queued_timestamp >
2252 spa_deadman_ziotime(zio->io_spa)) {
2253 mutex_exit(&zio->io_lock);
2254 timeout = MSEC_TO_TICK(zfs_deadman_checktime_ms);
2255 zio_deadman(zio, FTAG);
2256 mutex_enter(&zio->io_lock);
2257 }
2258 }
b128c09f 2259 mutex_exit(&zio->io_lock);
34dc7c2f 2260
b128c09f
BB
2261 error = zio->io_error;
2262 zio_destroy(zio);
34dc7c2f 2263
b128c09f
BB
2264 return (error);
2265}
34dc7c2f 2266
b128c09f
BB
2267void
2268zio_nowait(zio_t *zio)
2269{
9cdf7b1f
MA
2270 /*
2271 * See comment in zio_wait().
2272 */
2273 if (zio == NULL)
2274 return;
2275
1ce23dca 2276 ASSERT3P(zio->io_executor, ==, NULL);
34dc7c2f 2277
d164b209
BB
2278 if (zio->io_child_type == ZIO_CHILD_LOGICAL &&
2279 zio_unique_parent(zio) == NULL) {
8878261f
BB
2280 zio_t *pio;
2281
34dc7c2f 2282 /*
b128c09f 2283 * This is a logical async I/O with no parent to wait for it.
9babb374
BB
2284 * We add it to the spa_async_root_zio "Godfather" I/O which
2285 * will ensure they complete prior to unloading the pool.
34dc7c2f 2286 */
b128c09f 2287 spa_t *spa = zio->io_spa;
09eb36ce 2288 pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE];
9babb374 2289
8878261f 2290 zio_add_child(pio, zio);
b128c09f 2291 }
34dc7c2f 2292
3dfb57a3
DB
2293 ASSERT0(zio->io_queued_timestamp);
2294 zio->io_queued_timestamp = gethrtime();
da6b4005 2295 __zio_execute(zio);
b128c09f 2296}
34dc7c2f 2297
b128c09f
BB
2298/*
2299 * ==========================================================================
1ce23dca 2300 * Reexecute, cancel, or suspend/resume failed I/O
b128c09f
BB
2301 * ==========================================================================
2302 */
34dc7c2f 2303
b128c09f 2304static void
23c13c7e 2305zio_reexecute(void *arg)
b128c09f 2306{
23c13c7e 2307 zio_t *pio = arg;
d164b209
BB
2308 zio_t *cio, *cio_next;
2309
2310 ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
2311 ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
9babb374
BB
2312 ASSERT(pio->io_gang_leader == NULL);
2313 ASSERT(pio->io_gang_tree == NULL);
34dc7c2f 2314
b128c09f
BB
2315 pio->io_flags = pio->io_orig_flags;
2316 pio->io_stage = pio->io_orig_stage;
2317 pio->io_pipeline = pio->io_orig_pipeline;
2318 pio->io_reexecute = 0;
03c6040b 2319 pio->io_flags |= ZIO_FLAG_REEXECUTED;
3dfb57a3 2320 pio->io_pipeline_trace = 0;
b128c09f 2321 pio->io_error = 0;
1c27024e 2322 for (int w = 0; w < ZIO_WAIT_TYPES; w++)
d164b209 2323 pio->io_state[w] = 0;
1c27024e 2324 for (int c = 0; c < ZIO_CHILD_TYPES; c++)
b128c09f 2325 pio->io_child_error[c] = 0;
34dc7c2f 2326
428870ff
BB
2327 if (IO_IS_ALLOCATING(pio))
2328 BP_ZERO(pio->io_bp);
34dc7c2f 2329
b128c09f
BB
2330 /*
2331 * As we reexecute pio's children, new children could be created.
d164b209 2332 * New children go to the head of pio's io_child_list, however,
b128c09f 2333 * so we will (correctly) not reexecute them. The key is that
d164b209
BB
2334 * the remainder of pio's io_child_list, from 'cio_next' onward,
2335 * cannot be affected by any side effects of reexecuting 'cio'.
b128c09f 2336 */
1c27024e 2337 zio_link_t *zl = NULL;
a8b2e306 2338 mutex_enter(&pio->io_lock);
3dfb57a3
DB
2339 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
2340 cio_next = zio_walk_children(pio, &zl);
1c27024e 2341 for (int w = 0; w < ZIO_WAIT_TYPES; w++)
d164b209 2342 pio->io_children[cio->io_child_type][w]++;
b128c09f 2343 mutex_exit(&pio->io_lock);
d164b209 2344 zio_reexecute(cio);
a8b2e306 2345 mutex_enter(&pio->io_lock);
34dc7c2f 2346 }
a8b2e306 2347 mutex_exit(&pio->io_lock);
34dc7c2f 2348
b128c09f
BB
2349 /*
2350 * Now that all children have been reexecuted, execute the parent.
9babb374 2351 * We don't reexecute "The Godfather" I/O here as it's the
9e2c3bb4 2352 * responsibility of the caller to wait on it.
b128c09f 2353 */
3dfb57a3
DB
2354 if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) {
2355 pio->io_queued_timestamp = gethrtime();
da6b4005 2356 __zio_execute(pio);
3dfb57a3 2357 }
34dc7c2f
BB
2358}
2359
b128c09f 2360void
cec3a0a1 2361zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason)
34dc7c2f 2362{
b128c09f
BB
2363 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC)
2364 fm_panic("Pool '%s' has encountered an uncorrectable I/O "
2365 "failure and the failure mode property for this pool "
2366 "is set to panic.", spa_name(spa));
34dc7c2f 2367
bf89c199
BB
2368 cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable I/O "
2369 "failure and has been suspended.\n", spa_name(spa));
2370
1144586b 2371 (void) zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL,
4f072827 2372 NULL, NULL, 0);
34dc7c2f 2373
b128c09f 2374 mutex_enter(&spa->spa_suspend_lock);
34dc7c2f 2375
b128c09f 2376 if (spa->spa_suspend_zio_root == NULL)
9babb374
BB
2377 spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL,
2378 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
2379 ZIO_FLAG_GODFATHER);
34dc7c2f 2380
cec3a0a1 2381 spa->spa_suspended = reason;
34dc7c2f 2382
b128c09f 2383 if (zio != NULL) {
9babb374 2384 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
b128c09f
BB
2385 ASSERT(zio != spa->spa_suspend_zio_root);
2386 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
d164b209 2387 ASSERT(zio_unique_parent(zio) == NULL);
b128c09f
BB
2388 ASSERT(zio->io_stage == ZIO_STAGE_DONE);
2389 zio_add_child(spa->spa_suspend_zio_root, zio);
2390 }
34dc7c2f 2391
b128c09f
BB
2392 mutex_exit(&spa->spa_suspend_lock);
2393}
34dc7c2f 2394
9babb374 2395int
b128c09f
BB
2396zio_resume(spa_t *spa)
2397{
9babb374 2398 zio_t *pio;
34dc7c2f
BB
2399
2400 /*
b128c09f 2401 * Reexecute all previously suspended i/o.
34dc7c2f 2402 */
b128c09f 2403 mutex_enter(&spa->spa_suspend_lock);
cec3a0a1 2404 spa->spa_suspended = ZIO_SUSPEND_NONE;
b128c09f
BB
2405 cv_broadcast(&spa->spa_suspend_cv);
2406 pio = spa->spa_suspend_zio_root;
2407 spa->spa_suspend_zio_root = NULL;
2408 mutex_exit(&spa->spa_suspend_lock);
2409
2410 if (pio == NULL)
9babb374 2411 return (0);
34dc7c2f 2412
9babb374
BB
2413 zio_reexecute(pio);
2414 return (zio_wait(pio));
b128c09f
BB
2415}
2416
2417void
2418zio_resume_wait(spa_t *spa)
2419{
2420 mutex_enter(&spa->spa_suspend_lock);
2421 while (spa_suspended(spa))
2422 cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock);
2423 mutex_exit(&spa->spa_suspend_lock);
34dc7c2f
BB
2424}
2425
2426/*
2427 * ==========================================================================
b128c09f
BB
2428 * Gang blocks.
2429 *
2430 * A gang block is a collection of small blocks that looks to the DMU
2431 * like one large block. When zio_dva_allocate() cannot find a block
2432 * of the requested size, due to either severe fragmentation or the pool
2433 * being nearly full, it calls zio_write_gang_block() to construct the
2434 * block from smaller fragments.
2435 *
2436 * A gang block consists of a gang header (zio_gbh_phys_t) and up to
2437 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like
2438 * an indirect block: it's an array of block pointers. It consumes
2439 * only one sector and hence is allocatable regardless of fragmentation.
2440 * The gang header's bps point to its gang members, which hold the data.
2441 *
2442 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg>
2443 * as the verifier to ensure uniqueness of the SHA256 checksum.
2444 * Critically, the gang block bp's blk_cksum is the checksum of the data,
2445 * not the gang header. This ensures that data block signatures (needed for
2446 * deduplication) are independent of how the block is physically stored.
2447 *
2448 * Gang blocks can be nested: a gang member may itself be a gang block.
2449 * Thus every gang block is a tree in which root and all interior nodes are
2450 * gang headers, and the leaves are normal blocks that contain user data.
2451 * The root of the gang tree is called the gang leader.
2452 *
2453 * To perform any operation (read, rewrite, free, claim) on a gang block,
2454 * zio_gang_assemble() first assembles the gang tree (minus data leaves)
2455 * in the io_gang_tree field of the original logical i/o by recursively
2456 * reading the gang leader and all gang headers below it. This yields
2457 * an in-core tree containing the contents of every gang header and the
2458 * bps for every constituent of the gang block.
2459 *
2460 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree
2461 * and invokes a callback on each bp. To free a gang block, zio_gang_issue()
2462 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp.
2463 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim().
2464 * zio_read_gang() is a wrapper around zio_read() that omits reading gang
2465 * headers, since we already have those in io_gang_tree. zio_rewrite_gang()
2466 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite()
2467 * of the gang header plus zio_checksum_compute() of the data to update the
2468 * gang header's blk_cksum as described above.
2469 *
2470 * The two-phase assemble/issue model solves the problem of partial failure --
2471 * what if you'd freed part of a gang block but then couldn't read the
2472 * gang header for another part? Assembling the entire gang tree first
2473 * ensures that all the necessary gang header I/O has succeeded before
2474 * starting the actual work of free, claim, or write. Once the gang tree
2475 * is assembled, free and claim are in-memory operations that cannot fail.
2476 *
2477 * In the event that a gang write fails, zio_dva_unallocate() walks the
2478 * gang tree to immediately free (i.e. insert back into the space map)
2479 * everything we've allocated. This ensures that we don't get ENOSPC
2480 * errors during repeated suspend/resume cycles due to a flaky device.
2481 *
2482 * Gang rewrites only happen during sync-to-convergence. If we can't assemble
2483 * the gang tree, we won't modify the block, so we can safely defer the free
2484 * (knowing that the block is still intact). If we *can* assemble the gang
2485 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free
2486 * each constituent bp and we can allocate a new block on the next sync pass.
2487 *
2488 * In all cases, the gang tree allows complete recovery from partial failure.
34dc7c2f
BB
2489 * ==========================================================================
2490 */
b128c09f 2491
a6255b7f
DQ
2492static void
2493zio_gang_issue_func_done(zio_t *zio)
2494{
e2af2acc 2495 abd_free(zio->io_abd);
a6255b7f
DQ
2496}
2497
b128c09f 2498static zio_t *
a6255b7f
DQ
2499zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2500 uint64_t offset)
34dc7c2f 2501{
b128c09f
BB
2502 if (gn != NULL)
2503 return (pio);
34dc7c2f 2504
a6255b7f
DQ
2505 return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset),
2506 BP_GET_PSIZE(bp), zio_gang_issue_func_done,
2507 NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
b128c09f
BB
2508 &pio->io_bookmark));
2509}
2510
a6255b7f
DQ
2511static zio_t *
2512zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2513 uint64_t offset)
b128c09f
BB
2514{
2515 zio_t *zio;
2516
2517 if (gn != NULL) {
a6255b7f
DQ
2518 abd_t *gbh_abd =
2519 abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
b128c09f 2520 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
a6255b7f
DQ
2521 gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL,
2522 pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
2523 &pio->io_bookmark);
34dc7c2f 2524 /*
b128c09f
BB
2525 * As we rewrite each gang header, the pipeline will compute
2526 * a new gang block header checksum for it; but no one will
2527 * compute a new data checksum, so we do that here. The one
2528 * exception is the gang leader: the pipeline already computed
2529 * its data checksum because that stage precedes gang assembly.
2530 * (Presently, nothing actually uses interior data checksums;
2531 * this is just good hygiene.)
34dc7c2f 2532 */
9babb374 2533 if (gn != pio->io_gang_leader->io_gang_tree) {
a6255b7f
DQ
2534 abd_t *buf = abd_get_offset(data, offset);
2535
b128c09f 2536 zio_checksum_compute(zio, BP_GET_CHECKSUM(bp),
a6255b7f
DQ
2537 buf, BP_GET_PSIZE(bp));
2538
e2af2acc 2539 abd_free(buf);
b128c09f 2540 }
428870ff
BB
2541 /*
2542 * If we are here to damage data for testing purposes,
2543 * leave the GBH alone so that we can detect the damage.
2544 */
2545 if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE)
2546 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
34dc7c2f 2547 } else {
b128c09f 2548 zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
a6255b7f
DQ
2549 abd_get_offset(data, offset), BP_GET_PSIZE(bp),
2550 zio_gang_issue_func_done, NULL, pio->io_priority,
b128c09f 2551 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
34dc7c2f
BB
2552 }
2553
b128c09f
BB
2554 return (zio);
2555}
34dc7c2f 2556
a6255b7f
DQ
2557static zio_t *
2558zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2559 uint64_t offset)
b128c09f 2560{
14e4e3cb
AZ
2561 (void) gn, (void) data, (void) offset;
2562
9cdf7b1f
MA
2563 zio_t *zio = zio_free_sync(pio, pio->io_spa, pio->io_txg, bp,
2564 ZIO_GANG_CHILD_FLAGS(pio));
2565 if (zio == NULL) {
2566 zio = zio_null(pio, pio->io_spa,
2567 NULL, NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio));
2568 }
2569 return (zio);
34dc7c2f
BB
2570}
2571
a6255b7f
DQ
2572static zio_t *
2573zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
2574 uint64_t offset)
34dc7c2f 2575{
14e4e3cb 2576 (void) gn, (void) data, (void) offset;
b128c09f
BB
2577 return (zio_claim(pio, pio->io_spa, pio->io_txg, bp,
2578 NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)));
2579}
2580
2581static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = {
2582 NULL,
2583 zio_read_gang,
2584 zio_rewrite_gang,
2585 zio_free_gang,
2586 zio_claim_gang,
2587 NULL
2588};
34dc7c2f 2589
b128c09f 2590static void zio_gang_tree_assemble_done(zio_t *zio);
34dc7c2f 2591
b128c09f
BB
2592static zio_gang_node_t *
2593zio_gang_node_alloc(zio_gang_node_t **gnpp)
2594{
2595 zio_gang_node_t *gn;
34dc7c2f 2596
b128c09f 2597 ASSERT(*gnpp == NULL);
34dc7c2f 2598
79c76d5b 2599 gn = kmem_zalloc(sizeof (*gn), KM_SLEEP);
b128c09f
BB
2600 gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE);
2601 *gnpp = gn;
34dc7c2f 2602
b128c09f 2603 return (gn);
34dc7c2f
BB
2604}
2605
34dc7c2f 2606static void
b128c09f 2607zio_gang_node_free(zio_gang_node_t **gnpp)
34dc7c2f 2608{
b128c09f 2609 zio_gang_node_t *gn = *gnpp;
34dc7c2f 2610
1c27024e 2611 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
b128c09f
BB
2612 ASSERT(gn->gn_child[g] == NULL);
2613
2614 zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE);
2615 kmem_free(gn, sizeof (*gn));
2616 *gnpp = NULL;
34dc7c2f
BB
2617}
2618
b128c09f
BB
2619static void
2620zio_gang_tree_free(zio_gang_node_t **gnpp)
34dc7c2f 2621{
b128c09f 2622 zio_gang_node_t *gn = *gnpp;
34dc7c2f 2623
b128c09f
BB
2624 if (gn == NULL)
2625 return;
34dc7c2f 2626
1c27024e 2627 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
b128c09f 2628 zio_gang_tree_free(&gn->gn_child[g]);
34dc7c2f 2629
b128c09f 2630 zio_gang_node_free(gnpp);
34dc7c2f
BB
2631}
2632
b128c09f 2633static void
9babb374 2634zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp)
34dc7c2f 2635{
b128c09f 2636 zio_gang_node_t *gn = zio_gang_node_alloc(gnpp);
a6255b7f 2637 abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
b128c09f 2638
9babb374 2639 ASSERT(gio->io_gang_leader == gio);
b128c09f 2640 ASSERT(BP_IS_GANG(bp));
34dc7c2f 2641
a6255b7f
DQ
2642 zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE,
2643 zio_gang_tree_assemble_done, gn, gio->io_priority,
2644 ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark));
b128c09f 2645}
34dc7c2f 2646
b128c09f
BB
2647static void
2648zio_gang_tree_assemble_done(zio_t *zio)
2649{
9babb374 2650 zio_t *gio = zio->io_gang_leader;
b128c09f
BB
2651 zio_gang_node_t *gn = zio->io_private;
2652 blkptr_t *bp = zio->io_bp;
34dc7c2f 2653
9babb374 2654 ASSERT(gio == zio_unique_parent(zio));
428870ff 2655 ASSERT(zio->io_child_count == 0);
34dc7c2f 2656
b128c09f
BB
2657 if (zio->io_error)
2658 return;
34dc7c2f 2659
a6255b7f 2660 /* this ABD was created from a linear buf in zio_gang_tree_assemble */
b128c09f 2661 if (BP_SHOULD_BYTESWAP(bp))
a6255b7f 2662 byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size);
34dc7c2f 2663
a6255b7f 2664 ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh);
b128c09f 2665 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE);
428870ff 2666 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
34dc7c2f 2667
e2af2acc 2668 abd_free(zio->io_abd);
a6255b7f 2669
1c27024e 2670 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
b128c09f
BB
2671 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
2672 if (!BP_IS_GANG(gbp))
2673 continue;
9babb374 2674 zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]);
b128c09f 2675 }
34dc7c2f
BB
2676}
2677
b128c09f 2678static void
a6255b7f
DQ
2679zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data,
2680 uint64_t offset)
34dc7c2f 2681{
9babb374 2682 zio_t *gio = pio->io_gang_leader;
b128c09f 2683 zio_t *zio;
34dc7c2f 2684
b128c09f 2685 ASSERT(BP_IS_GANG(bp) == !!gn);
9babb374
BB
2686 ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp));
2687 ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree);
34dc7c2f 2688
b128c09f
BB
2689 /*
2690 * If you're a gang header, your data is in gn->gn_gbh.
2691 * If you're a gang member, your data is in 'data' and gn == NULL.
2692 */
a6255b7f 2693 zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset);
34dc7c2f 2694
b128c09f 2695 if (gn != NULL) {
428870ff 2696 ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
34dc7c2f 2697
1c27024e 2698 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
b128c09f
BB
2699 blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
2700 if (BP_IS_HOLE(gbp))
2701 continue;
a6255b7f
DQ
2702 zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data,
2703 offset);
2704 offset += BP_GET_PSIZE(gbp);
b128c09f 2705 }
34dc7c2f
BB
2706 }
2707
9babb374 2708 if (gn == gio->io_gang_tree)
a6255b7f 2709 ASSERT3U(gio->io_size, ==, offset);
34dc7c2f 2710
b128c09f
BB
2711 if (zio != pio)
2712 zio_nowait(zio);
34dc7c2f
BB
2713}
2714
62840030 2715static zio_t *
b128c09f 2716zio_gang_assemble(zio_t *zio)
34dc7c2f 2717{
b128c09f 2718 blkptr_t *bp = zio->io_bp;
34dc7c2f 2719
9babb374
BB
2720 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL);
2721 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
2722
2723 zio->io_gang_leader = zio;
34dc7c2f 2724
b128c09f 2725 zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree);
34dc7c2f 2726
62840030 2727 return (zio);
34dc7c2f
BB
2728}
2729
62840030 2730static zio_t *
b128c09f 2731zio_gang_issue(zio_t *zio)
34dc7c2f 2732{
b128c09f 2733 blkptr_t *bp = zio->io_bp;
34dc7c2f 2734
ddc751d5 2735 if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) {
62840030 2736 return (NULL);
ddc751d5 2737 }
34dc7c2f 2738
9babb374
BB
2739 ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio);
2740 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
34dc7c2f 2741
b128c09f 2742 if (zio->io_child_error[ZIO_CHILD_GANG] == 0)
a6255b7f
DQ
2743 zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd,
2744 0);
b128c09f 2745 else
9babb374 2746 zio_gang_tree_free(&zio->io_gang_tree);
34dc7c2f 2747
b128c09f 2748 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
34dc7c2f 2749
62840030 2750 return (zio);
34dc7c2f
BB
2751}
2752
2753static void
b128c09f 2754zio_write_gang_member_ready(zio_t *zio)
34dc7c2f 2755{
d164b209 2756 zio_t *pio = zio_unique_parent(zio);
34dc7c2f
BB
2757 dva_t *cdva = zio->io_bp->blk_dva;
2758 dva_t *pdva = pio->io_bp->blk_dva;
2759 uint64_t asize;
2a8ba608 2760 zio_t *gio __maybe_unused = zio->io_gang_leader;
34dc7c2f 2761
b128c09f
BB
2762 if (BP_IS_HOLE(zio->io_bp))
2763 return;
2764
2765 ASSERT(BP_IS_HOLE(&zio->io_bp_orig));
2766
2767 ASSERT(zio->io_child_type == ZIO_CHILD_GANG);
428870ff
BB
2768 ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies);
2769 ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp));
2770 ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp));
34dc7c2f 2771 ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
34dc7c2f
BB
2772
2773 mutex_enter(&pio->io_lock);
1c27024e 2774 for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
34dc7c2f
BB
2775 ASSERT(DVA_GET_GANG(&pdva[d]));
2776 asize = DVA_GET_ASIZE(&pdva[d]);
2777 asize += DVA_GET_ASIZE(&cdva[d]);
2778 DVA_SET_ASIZE(&pdva[d], asize);
2779 }
2780 mutex_exit(&pio->io_lock);
2781}
2782
a6255b7f
DQ
2783static void
2784zio_write_gang_done(zio_t *zio)
2785{
c955398b
BL
2786 /*
2787 * The io_abd field will be NULL for a zio with no data. The io_flags
2788 * will initially have the ZIO_FLAG_NODATA bit flag set, but we can't
2789 * check for it here as it is cleared in zio_ready.
2790 */
2791 if (zio->io_abd != NULL)
e2af2acc 2792 abd_free(zio->io_abd);
a6255b7f
DQ
2793}
2794
62840030 2795static zio_t *
aa755b35 2796zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
34dc7c2f 2797{
b128c09f
BB
2798 spa_t *spa = pio->io_spa;
2799 blkptr_t *bp = pio->io_bp;
9babb374 2800 zio_t *gio = pio->io_gang_leader;
b128c09f
BB
2801 zio_t *zio;
2802 zio_gang_node_t *gn, **gnpp;
34dc7c2f 2803 zio_gbh_phys_t *gbh;
a6255b7f 2804 abd_t *gbh_abd;
b128c09f
BB
2805 uint64_t txg = pio->io_txg;
2806 uint64_t resid = pio->io_size;
2807 uint64_t lsize;
428870ff 2808 int copies = gio->io_prop.zp_copies;
b5256303 2809 int gbh_copies;
b128c09f 2810 zio_prop_t zp;
1c27024e 2811 int error;
c955398b 2812 boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA);
b5256303
TC
2813
2814 /*
2815 * encrypted blocks need DVA[2] free so encrypted gang headers can't
2816 * have a third copy.
2817 */
2818 gbh_copies = MIN(copies + 1, spa_max_replication(spa));
2819 if (gio->io_prop.zp_encrypt && gbh_copies >= SPA_DVAS_PER_BP)
2820 gbh_copies = SPA_DVAS_PER_BP - 1;
2821
1c27024e 2822 int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER;
3dfb57a3
DB
2823 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
2824 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
c955398b 2825 ASSERT(has_data);
3dfb57a3
DB
2826
2827 flags |= METASLAB_ASYNC_ALLOC;
f8020c93
AM
2828 VERIFY(zfs_refcount_held(&mc->mc_allocator[pio->io_allocator].
2829 mca_alloc_slots, pio));
3dfb57a3
DB
2830
2831 /*
2832 * The logical zio has already placed a reservation for
2833 * 'copies' allocation slots but gang blocks may require
2834 * additional copies. These additional copies
2835 * (i.e. gbh_copies - copies) are guaranteed to succeed
2836 * since metaslab_class_throttle_reserve() always allows
2837 * additional reservations for gang blocks.
2838 */
2839 VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies,
492f64e9 2840 pio->io_allocator, pio, flags));
3dfb57a3
DB
2841 }
2842
2843 error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE,
4e21fd06 2844 bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
492f64e9 2845 &pio->io_alloc_list, pio, pio->io_allocator);
34dc7c2f 2846 if (error) {
3dfb57a3
DB
2847 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
2848 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
c955398b 2849 ASSERT(has_data);
3dfb57a3
DB
2850
2851 /*
2852 * If we failed to allocate the gang block header then
2853 * we remove any additional allocation reservations that
2854 * we placed here. The original reservation will
2855 * be removed when the logical I/O goes to the ready
2856 * stage.
2857 */
2858 metaslab_class_throttle_unreserve(mc,
492f64e9 2859 gbh_copies - copies, pio->io_allocator, pio);
3dfb57a3
DB
2860 }
2861
b128c09f 2862 pio->io_error = error;
62840030 2863 return (pio);
34dc7c2f
BB
2864 }
2865
9babb374
BB
2866 if (pio == gio) {
2867 gnpp = &gio->io_gang_tree;
b128c09f
BB
2868 } else {
2869 gnpp = pio->io_private;
2870 ASSERT(pio->io_ready == zio_write_gang_member_ready);
34dc7c2f
BB
2871 }
2872
b128c09f
BB
2873 gn = zio_gang_node_alloc(gnpp);
2874 gbh = gn->gn_gbh;
861166b0 2875 memset(gbh, 0, SPA_GANGBLOCKSIZE);
a6255b7f 2876 gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE);
34dc7c2f 2877
b128c09f
BB
2878 /*
2879 * Create the gang header.
2880 */
a6255b7f
DQ
2881 zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE,
2882 zio_write_gang_done, NULL, pio->io_priority,
2883 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
34dc7c2f 2884
b128c09f
BB
2885 /*
2886 * Create and nowait the gang children.
2887 */
1c27024e 2888 for (int g = 0; resid != 0; resid -= lsize, g++) {
b128c09f
BB
2889 lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g),
2890 SPA_MINBLOCKSIZE);
2891 ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid);
2892
9babb374 2893 zp.zp_checksum = gio->io_prop.zp_checksum;
b128c09f 2894 zp.zp_compress = ZIO_COMPRESS_OFF;
10b3c7f5 2895 zp.zp_complevel = gio->io_prop.zp_complevel;
b128c09f
BB
2896 zp.zp_type = DMU_OT_NONE;
2897 zp.zp_level = 0;
428870ff 2898 zp.zp_copies = gio->io_prop.zp_copies;
03c6040b
GW
2899 zp.zp_dedup = B_FALSE;
2900 zp.zp_dedup_verify = B_FALSE;
2901 zp.zp_nopwrite = B_FALSE;
4807c0ba
TC
2902 zp.zp_encrypt = gio->io_prop.zp_encrypt;
2903 zp.zp_byteorder = gio->io_prop.zp_byteorder;
861166b0
AZ
2904 memset(zp.zp_salt, 0, ZIO_DATA_SALT_LEN);
2905 memset(zp.zp_iv, 0, ZIO_DATA_IV_LEN);
2906 memset(zp.zp_mac, 0, ZIO_DATA_MAC_LEN);
b128c09f 2907
1c27024e 2908 zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g],
c955398b
BL
2909 has_data ? abd_get_offset(pio->io_abd, pio->io_size -
2910 resid) : NULL, lsize, lsize, &zp,
2911 zio_write_gang_member_ready, NULL, NULL,
a6255b7f 2912 zio_write_gang_done, &gn->gn_child[g], pio->io_priority,
3dfb57a3
DB
2913 ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
2914
2915 if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
2916 ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
c955398b 2917 ASSERT(has_data);
3dfb57a3
DB
2918
2919 /*
2920 * Gang children won't throttle but we should
2921 * account for their work, so reserve an allocation
2922 * slot for them here.
2923 */
2924 VERIFY(metaslab_class_throttle_reserve(mc,
492f64e9 2925 zp.zp_copies, cio->io_allocator, cio, flags));
3dfb57a3
DB
2926 }
2927 zio_nowait(cio);
b128c09f 2928 }
34dc7c2f
BB
2929
2930 /*
b128c09f 2931 * Set pio's pipeline to just wait for zio to finish.
34dc7c2f 2932 */
b128c09f
BB
2933 pio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
2934
920dd524
ED
2935 /*
2936 * We didn't allocate this bp, so make sure it doesn't get unmarked.
2937 */
2938 pio->io_flags &= ~ZIO_FLAG_FASTWRITE;
2939
b128c09f
BB
2940 zio_nowait(zio);
2941
62840030 2942 return (pio);
34dc7c2f
BB
2943}
2944
03c6040b 2945/*
3c67d83a
TH
2946 * The zio_nop_write stage in the pipeline determines if allocating a
2947 * new bp is necessary. The nopwrite feature can handle writes in
2948 * either syncing or open context (i.e. zil writes) and as a result is
2949 * mutually exclusive with dedup.
2950 *
2951 * By leveraging a cryptographically secure checksum, such as SHA256, we
2952 * can compare the checksums of the new data and the old to determine if
2953 * allocating a new block is required. Note that our requirements for
2954 * cryptographic strength are fairly weak: there can't be any accidental
2955 * hash collisions, but we don't need to be secure against intentional
2956 * (malicious) collisions. To trigger a nopwrite, you have to be able
2957 * to write the file to begin with, and triggering an incorrect (hash
2958 * collision) nopwrite is no worse than simply writing to the file.
2959 * That said, there are no known attacks against the checksum algorithms
2960 * used for nopwrite, assuming that the salt and the checksums
2961 * themselves remain secret.
03c6040b 2962 */
62840030 2963static zio_t *
03c6040b
GW
2964zio_nop_write(zio_t *zio)
2965{
2966 blkptr_t *bp = zio->io_bp;
2967 blkptr_t *bp_orig = &zio->io_bp_orig;
2968 zio_prop_t *zp = &zio->io_prop;
2969
2970 ASSERT(BP_GET_LEVEL(bp) == 0);
2971 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
2972 ASSERT(zp->zp_nopwrite);
2973 ASSERT(!zp->zp_dedup);
2974 ASSERT(zio->io_bp_override == NULL);
2975 ASSERT(IO_IS_ALLOCATING(zio));
2976
2977 /*
2978 * Check to see if the original bp and the new bp have matching
2979 * characteristics (i.e. same checksum, compression algorithms, etc).
2980 * If they don't then just continue with the pipeline which will
2981 * allocate a new bp.
2982 */
2983 if (BP_IS_HOLE(bp_orig) ||
3c67d83a
TH
2984 !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags &
2985 ZCHECKSUM_FLAG_NOPWRITE) ||
b5256303 2986 BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) ||
03c6040b
GW
2987 BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) ||
2988 BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) ||
2989 BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) ||
2990 zp->zp_copies != BP_GET_NDVAS(bp_orig))
62840030 2991 return (zio);
03c6040b
GW
2992
2993 /*
2994 * If the checksums match then reset the pipeline so that we
2995 * avoid allocating a new bp and issuing any I/O.
2996 */
2997 if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) {
3c67d83a
TH
2998 ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags &
2999 ZCHECKSUM_FLAG_NOPWRITE);
03c6040b
GW
3000 ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig));
3001 ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig));
3002 ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF);
861166b0 3003 ASSERT(memcmp(&bp->blk_prop, &bp_orig->blk_prop,
03c6040b
GW
3004 sizeof (uint64_t)) == 0);
3005
681a85cb
GW
3006 /*
3007 * If we're overwriting a block that is currently on an
3008 * indirect vdev, then ignore the nopwrite request and
3009 * allow a new block to be allocated on a concrete vdev.
3010 */
3011 spa_config_enter(zio->io_spa, SCL_VDEV, FTAG, RW_READER);
3012 vdev_t *tvd = vdev_lookup_top(zio->io_spa,
3013 DVA_GET_VDEV(&bp->blk_dva[0]));
3014 if (tvd->vdev_ops == &vdev_indirect_ops) {
3015 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
3016 return (zio);
3017 }
3018 spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
3019
03c6040b
GW
3020 *bp = *bp_orig;
3021 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
3022 zio->io_flags |= ZIO_FLAG_NOPWRITE;
3023 }
3024
62840030 3025 return (zio);
03c6040b
GW
3026}
3027
34dc7c2f
BB
3028/*
3029 * ==========================================================================
428870ff 3030 * Dedup
34dc7c2f
BB
3031 * ==========================================================================
3032 */
428870ff
BB
3033static void
3034zio_ddt_child_read_done(zio_t *zio)
3035{
3036 blkptr_t *bp = zio->io_bp;
3037 ddt_entry_t *dde = zio->io_private;
3038 ddt_phys_t *ddp;
3039 zio_t *pio = zio_unique_parent(zio);
3040
3041 mutex_enter(&pio->io_lock);
3042 ddp = ddt_phys_select(dde, bp);
3043 if (zio->io_error == 0)
3044 ddt_phys_clear(ddp); /* this ddp doesn't need repair */
a6255b7f
DQ
3045
3046 if (zio->io_error == 0 && dde->dde_repair_abd == NULL)
3047 dde->dde_repair_abd = zio->io_abd;
428870ff 3048 else
a6255b7f 3049 abd_free(zio->io_abd);
428870ff
BB
3050 mutex_exit(&pio->io_lock);
3051}
3052
62840030 3053static zio_t *
428870ff
BB
3054zio_ddt_read_start(zio_t *zio)
3055{
3056 blkptr_t *bp = zio->io_bp;
3057
3058 ASSERT(BP_GET_DEDUP(bp));
3059 ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
3060 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3061
3062 if (zio->io_child_error[ZIO_CHILD_DDT]) {
3063 ddt_t *ddt = ddt_select(zio->io_spa, bp);
3064 ddt_entry_t *dde = ddt_repair_start(ddt, bp);
3065 ddt_phys_t *ddp = dde->dde_phys;
3066 ddt_phys_t *ddp_self = ddt_phys_select(dde, bp);
3067 blkptr_t blk;
3068
3069 ASSERT(zio->io_vsd == NULL);
3070 zio->io_vsd = dde;
3071
3072 if (ddp_self == NULL)
62840030 3073 return (zio);
428870ff 3074
1c27024e 3075 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
428870ff
BB
3076 if (ddp->ddp_phys_birth == 0 || ddp == ddp_self)
3077 continue;
3078 ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp,
3079 &blk);
3080 zio_nowait(zio_read(zio, zio->io_spa, &blk,
a6255b7f
DQ
3081 abd_alloc_for_io(zio->io_size, B_TRUE),
3082 zio->io_size, zio_ddt_child_read_done, dde,
3083 zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) |
3084 ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark));
428870ff 3085 }
62840030 3086 return (zio);
428870ff
BB
3087 }
3088
3089 zio_nowait(zio_read(zio, zio->io_spa, bp,
a6255b7f 3090 zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority,
428870ff
BB
3091 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark));
3092
62840030 3093 return (zio);
428870ff
BB
3094}
3095
62840030 3096static zio_t *
428870ff
BB
3097zio_ddt_read_done(zio_t *zio)
3098{
3099 blkptr_t *bp = zio->io_bp;
3100
ddc751d5 3101 if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) {
62840030 3102 return (NULL);
ddc751d5 3103 }
428870ff
BB
3104
3105 ASSERT(BP_GET_DEDUP(bp));
3106 ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
3107 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3108
3109 if (zio->io_child_error[ZIO_CHILD_DDT]) {
3110 ddt_t *ddt = ddt_select(zio->io_spa, bp);
3111 ddt_entry_t *dde = zio->io_vsd;
3112 if (ddt == NULL) {
3113 ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE);
62840030 3114 return (zio);
428870ff
BB
3115 }
3116 if (dde == NULL) {
3117 zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1;
3118 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
62840030 3119 return (NULL);
428870ff 3120 }
a6255b7f
DQ
3121 if (dde->dde_repair_abd != NULL) {
3122 abd_copy(zio->io_abd, dde->dde_repair_abd,
3123 zio->io_size);
428870ff
BB
3124 zio->io_child_error[ZIO_CHILD_DDT] = 0;
3125 }
3126 ddt_repair_done(ddt, dde);
3127 zio->io_vsd = NULL;
3128 }
3129
3130 ASSERT(zio->io_vsd == NULL);
3131
62840030 3132 return (zio);
428870ff
BB
3133}
3134
3135static boolean_t
3136zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
3137{
3138 spa_t *spa = zio->io_spa;
c17bcf83 3139 boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW);
428870ff 3140
c17bcf83 3141 ASSERT(!(zio->io_bp_override && do_raw));
2aa34383 3142
428870ff
BB
3143 /*
3144 * Note: we compare the original data, not the transformed data,
3145 * because when zio->io_bp is an override bp, we will not have
3146 * pushed the I/O transforms. That's an important optimization
3147 * because otherwise we'd compress/encrypt all dmu_sync() data twice.
c17bcf83 3148 * However, we should never get a raw, override zio so in these
b5256303 3149 * cases we can compare the io_abd directly. This is useful because
c17bcf83
TC
3150 * it allows us to do dedup verification even if we don't have access
3151 * to the original data (for instance, if the encryption keys aren't
3152 * loaded).
428870ff 3153 */
c17bcf83 3154
1c27024e 3155 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
428870ff
BB
3156 zio_t *lio = dde->dde_lead_zio[p];
3157
c17bcf83
TC
3158 if (lio != NULL && do_raw) {
3159 return (lio->io_size != zio->io_size ||
a6255b7f 3160 abd_cmp(zio->io_abd, lio->io_abd) != 0);
c17bcf83 3161 } else if (lio != NULL) {
428870ff 3162 return (lio->io_orig_size != zio->io_orig_size ||
a6255b7f 3163 abd_cmp(zio->io_orig_abd, lio->io_orig_abd) != 0);
428870ff
BB
3164 }
3165 }
3166
1c27024e 3167 for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
428870ff
BB
3168 ddt_phys_t *ddp = &dde->dde_phys[p];
3169
c17bcf83
TC
3170 if (ddp->ddp_phys_birth != 0 && do_raw) {
3171 blkptr_t blk = *zio->io_bp;
3172 uint64_t psize;
a6255b7f 3173 abd_t *tmpabd;
c17bcf83
TC
3174 int error;
3175
3176 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
3177 psize = BP_GET_PSIZE(&blk);
3178
3179 if (psize != zio->io_size)
3180 return (B_TRUE);
3181
3182 ddt_exit(ddt);
3183
a6255b7f 3184 tmpabd = abd_alloc_for_io(psize, B_TRUE);
c17bcf83 3185
a6255b7f 3186 error = zio_wait(zio_read(NULL, spa, &blk, tmpabd,
c17bcf83
TC
3187 psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
3188 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
3189 ZIO_FLAG_RAW, &zio->io_bookmark));
3190
3191 if (error == 0) {
a6255b7f 3192 if (abd_cmp(tmpabd, zio->io_abd) != 0)
c17bcf83
TC
3193 error = SET_ERROR(ENOENT);
3194 }
3195
a6255b7f 3196 abd_free(tmpabd);
c17bcf83
TC
3197 ddt_enter(ddt);
3198 return (error != 0);
3199 } else if (ddp->ddp_phys_birth != 0) {
428870ff 3200 arc_buf_t *abuf = NULL;
2a432414 3201 arc_flags_t aflags = ARC_FLAG_WAIT;
428870ff
BB
3202 blkptr_t blk = *zio->io_bp;
3203 int error;
3204
3205 ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
3206
c17bcf83
TC
3207 if (BP_GET_LSIZE(&blk) != zio->io_orig_size)
3208 return (B_TRUE);
3209
428870ff
BB
3210 ddt_exit(ddt);
3211
294f6806 3212 error = arc_read(NULL, spa, &blk,
428870ff
BB
3213 arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ,
3214 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3215 &aflags, &zio->io_bookmark);
3216
3217 if (error == 0) {
a6255b7f 3218 if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data,
428870ff 3219 zio->io_orig_size) != 0)
c17bcf83 3220 error = SET_ERROR(ENOENT);
d3c2ae1c 3221 arc_buf_destroy(abuf, &abuf);
428870ff
BB
3222 }
3223
3224 ddt_enter(ddt);
3225 return (error != 0);
3226 }
3227 }
3228
3229 return (B_FALSE);
3230}
3231
3232static void
3233zio_ddt_child_write_ready(zio_t *zio)
3234{
3235 int p = zio->io_prop.zp_copies;
3236 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
3237 ddt_entry_t *dde = zio->io_private;
3238 ddt_phys_t *ddp = &dde->dde_phys[p];
3239 zio_t *pio;
3240
3241 if (zio->io_error)
3242 return;
3243
3244 ddt_enter(ddt);
3245
3246 ASSERT(dde->dde_lead_zio[p] == zio);
3247
3248 ddt_phys_fill(ddp, zio->io_bp);
3249
1c27024e 3250 zio_link_t *zl = NULL;
3dfb57a3 3251 while ((pio = zio_walk_parents(zio, &zl)) != NULL)
428870ff
BB
3252 ddt_bp_fill(ddp, pio->io_bp, zio->io_txg);
3253
3254 ddt_exit(ddt);
3255}
3256
3257static void
3258zio_ddt_child_write_done(zio_t *zio)
3259{
3260 int p = zio->io_prop.zp_copies;
3261 ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
3262 ddt_entry_t *dde = zio->io_private;
3263 ddt_phys_t *ddp = &dde->dde_phys[p];
3264
3265 ddt_enter(ddt);
3266
3267 ASSERT(ddp->ddp_refcnt == 0);
3268 ASSERT(dde->dde_lead_zio[p] == zio);
3269 dde->dde_lead_zio[p] = NULL;
3270
3271 if (zio->io_error == 0) {
3dfb57a3
DB
3272 zio_link_t *zl = NULL;
3273 while (zio_walk_parents(zio, &zl) != NULL)
428870ff
BB
3274 ddt_phys_addref(ddp);
3275 } else {
3276 ddt_phys_clear(ddp);
3277 }
3278
3279 ddt_exit(ddt);
3280}
3281
62840030 3282static zio_t *
428870ff
BB
3283zio_ddt_write(zio_t *zio)
3284{
3285 spa_t *spa = zio->io_spa;
3286 blkptr_t *bp = zio->io_bp;
3287 uint64_t txg = zio->io_txg;
3288 zio_prop_t *zp = &zio->io_prop;
3289 int p = zp->zp_copies;
428870ff 3290 zio_t *cio = NULL;
428870ff
BB
3291 ddt_t *ddt = ddt_select(spa, bp);
3292 ddt_entry_t *dde;
3293 ddt_phys_t *ddp;
3294
3295 ASSERT(BP_GET_DEDUP(bp));
3296 ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum);
3297 ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override);
c17bcf83 3298 ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW)));
428870ff
BB
3299
3300 ddt_enter(ddt);
3301 dde = ddt_lookup(ddt, bp, B_TRUE);
3302 ddp = &dde->dde_phys[p];
3303
3304 if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) {
3305 /*
3306 * If we're using a weak checksum, upgrade to a strong checksum
3307 * and try again. If we're already using a strong checksum,
3308 * we can't resolve it, so just convert to an ordinary write.
3309 * (And automatically e-mail a paper to Nature?)
3310 */
3c67d83a
TH
3311 if (!(zio_checksum_table[zp->zp_checksum].ci_flags &
3312 ZCHECKSUM_FLAG_DEDUP)) {
428870ff
BB
3313 zp->zp_checksum = spa_dedup_checksum(spa);
3314 zio_pop_transforms(zio);
3315 zio->io_stage = ZIO_STAGE_OPEN;
3316 BP_ZERO(bp);
3317 } else {
03c6040b 3318 zp->zp_dedup = B_FALSE;
accd6d9d 3319 BP_SET_DEDUP(bp, B_FALSE);
428870ff 3320 }
accd6d9d 3321 ASSERT(!BP_GET_DEDUP(bp));
428870ff
BB
3322 zio->io_pipeline = ZIO_WRITE_PIPELINE;
3323 ddt_exit(ddt);
62840030 3324 return (zio);
428870ff
BB
3325 }
3326
428870ff
BB
3327 if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) {
3328 if (ddp->ddp_phys_birth != 0)
3329 ddt_bp_fill(ddp, bp, txg);
3330 if (dde->dde_lead_zio[p] != NULL)
3331 zio_add_child(zio, dde->dde_lead_zio[p]);
3332 else
3333 ddt_phys_addref(ddp);
3334 } else if (zio->io_bp_override) {
3335 ASSERT(bp->blk_birth == txg);
3336 ASSERT(BP_EQUAL(bp, zio->io_bp_override));
3337 ddt_phys_fill(ddp, bp);
3338 ddt_phys_addref(ddp);
3339 } else {
a6255b7f 3340 cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd,
2aa34383 3341 zio->io_orig_size, zio->io_orig_size, zp,
bc77ba73 3342 zio_ddt_child_write_ready, NULL, NULL,
428870ff
BB
3343 zio_ddt_child_write_done, dde, zio->io_priority,
3344 ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
3345
a6255b7f 3346 zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL);
428870ff
BB
3347 dde->dde_lead_zio[p] = cio;
3348 }
3349
3350 ddt_exit(ddt);
3351
9cdf7b1f 3352 zio_nowait(cio);
428870ff 3353
62840030 3354 return (zio);
428870ff
BB
3355}
3356
3357ddt_entry_t *freedde; /* for debugging */
b128c09f 3358
62840030 3359static zio_t *
428870ff
BB
3360zio_ddt_free(zio_t *zio)
3361{
3362 spa_t *spa = zio->io_spa;
3363 blkptr_t *bp = zio->io_bp;
3364 ddt_t *ddt = ddt_select(spa, bp);
3365 ddt_entry_t *dde;
3366 ddt_phys_t *ddp;
3367
3368 ASSERT(BP_GET_DEDUP(bp));
3369 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
3370
3371 ddt_enter(ddt);
3372 freedde = dde = ddt_lookup(ddt, bp, B_TRUE);
5dc6af0e
BB
3373 if (dde) {
3374 ddp = ddt_phys_select(dde, bp);
3375 if (ddp)
3376 ddt_phys_decref(ddp);
3377 }
428870ff
BB
3378 ddt_exit(ddt);
3379
62840030 3380 return (zio);
428870ff
BB
3381}
3382
3383/*
3384 * ==========================================================================
3385 * Allocate and free blocks
3386 * ==========================================================================
3387 */
3dfb57a3
DB
3388
3389static zio_t *
492f64e9 3390zio_io_to_allocate(spa_t *spa, int allocator)
3dfb57a3
DB
3391{
3392 zio_t *zio;
3393
1b50749c 3394 ASSERT(MUTEX_HELD(&spa->spa_allocs[allocator].spaa_lock));
3dfb57a3 3395
1b50749c 3396 zio = avl_first(&spa->spa_allocs[allocator].spaa_tree);
3dfb57a3
DB
3397 if (zio == NULL)
3398 return (NULL);
3399
3400 ASSERT(IO_IS_ALLOCATING(zio));
3401
3402 /*
3403 * Try to place a reservation for this zio. If we're unable to
3404 * reserve then we throttle.
3405 */
492f64e9 3406 ASSERT3U(zio->io_allocator, ==, allocator);
cc99f275 3407 if (!metaslab_class_throttle_reserve(zio->io_metaslab_class,
1b50749c 3408 zio->io_prop.zp_copies, allocator, zio, 0)) {
3dfb57a3
DB
3409 return (NULL);
3410 }
3411
1b50749c 3412 avl_remove(&spa->spa_allocs[allocator].spaa_tree, zio);
3dfb57a3
DB
3413 ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE);
3414
3415 return (zio);
3416}
3417
62840030 3418static zio_t *
3dfb57a3
DB
3419zio_dva_throttle(zio_t *zio)
3420{
3421 spa_t *spa = zio->io_spa;
3422 zio_t *nio;
cc99f275
DB
3423 metaslab_class_t *mc;
3424
3425 /* locate an appropriate allocation class */
3426 mc = spa_preferred_class(spa, zio->io_size, zio->io_prop.zp_type,
3427 zio->io_prop.zp_level, zio->io_prop.zp_zpl_smallblk);
3dfb57a3
DB
3428
3429 if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE ||
cc99f275 3430 !mc->mc_alloc_throttle_enabled ||
3dfb57a3
DB
3431 zio->io_child_type == ZIO_CHILD_GANG ||
3432 zio->io_flags & ZIO_FLAG_NODATA) {
62840030 3433 return (zio);
3dfb57a3
DB
3434 }
3435
1b50749c 3436 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
3dfb57a3 3437 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
3dfb57a3
DB
3438 ASSERT3U(zio->io_queued_timestamp, >, 0);
3439 ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE);
3440
492f64e9
PD
3441 zbookmark_phys_t *bm = &zio->io_bookmark;
3442 /*
3443 * We want to try to use as many allocators as possible to help improve
3444 * performance, but we also want logically adjacent IOs to be physically
3445 * adjacent to improve sequential read performance. We chunk each object
3446 * into 2^20 block regions, and then hash based on the objset, object,
3447 * level, and region to accomplish both of these goals.
3448 */
1b50749c 3449 int allocator = (uint_t)cityhash4(bm->zb_objset, bm->zb_object,
492f64e9 3450 bm->zb_level, bm->zb_blkid >> 20) % spa->spa_alloc_count;
1b50749c 3451 zio->io_allocator = allocator;
cc99f275 3452 zio->io_metaslab_class = mc;
1b50749c
AM
3453 mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
3454 avl_add(&spa->spa_allocs[allocator].spaa_tree, zio);
3455 nio = zio_io_to_allocate(spa, allocator);
3456 mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
62840030 3457 return (nio);
3dfb57a3
DB
3458}
3459
cc99f275 3460static void
492f64e9 3461zio_allocate_dispatch(spa_t *spa, int allocator)
3dfb57a3
DB
3462{
3463 zio_t *zio;
3464
1b50749c 3465 mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
492f64e9 3466 zio = zio_io_to_allocate(spa, allocator);
1b50749c 3467 mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
3dfb57a3
DB
3468 if (zio == NULL)
3469 return;
3470
3471 ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE);
3472 ASSERT0(zio->io_error);
3473 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE);
3474}
3475
62840030 3476static zio_t *
34dc7c2f
BB
3477zio_dva_allocate(zio_t *zio)
3478{
3479 spa_t *spa = zio->io_spa;
cc99f275 3480 metaslab_class_t *mc;
34dc7c2f
BB
3481 blkptr_t *bp = zio->io_bp;
3482 int error;
6d974228 3483 int flags = 0;
34dc7c2f 3484
9babb374
BB
3485 if (zio->io_gang_leader == NULL) {
3486 ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
3487 zio->io_gang_leader = zio;
3488 }
3489
34dc7c2f 3490 ASSERT(BP_IS_HOLE(bp));
c99c9001 3491 ASSERT0(BP_GET_NDVAS(bp));
428870ff
BB
3492 ASSERT3U(zio->io_prop.zp_copies, >, 0);
3493 ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa));
34dc7c2f
BB
3494 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
3495
920dd524 3496 flags |= (zio->io_flags & ZIO_FLAG_FASTWRITE) ? METASLAB_FASTWRITE : 0;
3dfb57a3
DB
3497 if (zio->io_flags & ZIO_FLAG_NODATA)
3498 flags |= METASLAB_DONT_THROTTLE;
3499 if (zio->io_flags & ZIO_FLAG_GANG_CHILD)
3500 flags |= METASLAB_GANG_CHILD;
3501 if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE)
3502 flags |= METASLAB_ASYNC_ALLOC;
3503
cc99f275
DB
3504 /*
3505 * if not already chosen, locate an appropriate allocation class
3506 */
3507 mc = zio->io_metaslab_class;
3508 if (mc == NULL) {
3509 mc = spa_preferred_class(spa, zio->io_size,
3510 zio->io_prop.zp_type, zio->io_prop.zp_level,
3511 zio->io_prop.zp_zpl_smallblk);
3512 zio->io_metaslab_class = mc;
3513 }
3514
aa755b35
MA
3515 /*
3516 * Try allocating the block in the usual metaslab class.
3517 * If that's full, allocate it in the normal class.
3518 * If that's full, allocate as a gang block,
3519 * and if all are full, the allocation fails (which shouldn't happen).
3520 *
3521 * Note that we do not fall back on embedded slog (ZIL) space, to
3522 * preserve unfragmented slog space, which is critical for decent
3523 * sync write performance. If a log allocation fails, we will fall
3524 * back to spa_sync() which is abysmal for performance.
3525 */
b128c09f 3526 error = metaslab_alloc(spa, mc, zio->io_size, bp,
4e21fd06 3527 zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
492f64e9 3528 &zio->io_alloc_list, zio, zio->io_allocator);
34dc7c2f 3529
cc99f275
DB
3530 /*
3531 * Fallback to normal class when an alloc class is full
3532 */
3533 if (error == ENOSPC && mc != spa_normal_class(spa)) {
3534 /*
3535 * If throttling, transfer reservation over to normal class.
3536 * The io_allocator slot can remain the same even though we
3537 * are switching classes.
3538 */
3539 if (mc->mc_alloc_throttle_enabled &&
3540 (zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) {
3541 metaslab_class_throttle_unreserve(mc,
3542 zio->io_prop.zp_copies, zio->io_allocator, zio);
3543 zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING;
3544
aa755b35
MA
3545 VERIFY(metaslab_class_throttle_reserve(
3546 spa_normal_class(spa),
cc99f275
DB
3547 zio->io_prop.zp_copies, zio->io_allocator, zio,
3548 flags | METASLAB_MUST_RESERVE));
cc99f275 3549 }
aa755b35
MA
3550 zio->io_metaslab_class = mc = spa_normal_class(spa);
3551 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
3552 zfs_dbgmsg("%s: metaslab allocation failure, "
3553 "trying normal class: zio %px, size %llu, error %d",
8e739b2c
RE
3554 spa_name(spa), zio, (u_longlong_t)zio->io_size,
3555 error);
aa755b35 3556 }
cc99f275
DB
3557
3558 error = metaslab_alloc(spa, mc, zio->io_size, bp,
3559 zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
3560 &zio->io_alloc_list, zio, zio->io_allocator);
3561 }
3562
aa755b35
MA
3563 if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) {
3564 if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
3565 zfs_dbgmsg("%s: metaslab allocation failure, "
3566 "trying ganging: zio %px, size %llu, error %d",
8e739b2c
RE
3567 spa_name(spa), zio, (u_longlong_t)zio->io_size,
3568 error);
aa755b35
MA
3569 }
3570 return (zio_write_gang_block(zio, mc));
3571 }
3dfb57a3 3572 if (error != 0) {
aa755b35
MA
3573 if (error != ENOSPC ||
3574 (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC)) {
3575 zfs_dbgmsg("%s: metaslab allocation failure: zio %px, "
3576 "size %llu, error %d",
8e739b2c
RE
3577 spa_name(spa), zio, (u_longlong_t)zio->io_size,
3578 error);
aa755b35 3579 }
34dc7c2f
BB
3580 zio->io_error = error;
3581 }
3582
62840030 3583 return (zio);
34dc7c2f
BB
3584}
3585
62840030 3586static zio_t *
34dc7c2f
BB
3587zio_dva_free(zio_t *zio)
3588{
b128c09f 3589 metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE);
34dc7c2f 3590
62840030 3591 return (zio);
34dc7c2f
BB
3592}
3593
62840030 3594static zio_t *
34dc7c2f
BB
3595zio_dva_claim(zio_t *zio)
3596{
b128c09f
BB
3597 int error;
3598
3599 error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg);
3600 if (error)
3601 zio->io_error = error;
34dc7c2f 3602
62840030 3603 return (zio);
34dc7c2f
BB
3604}
3605
b128c09f
BB
3606/*
3607 * Undo an allocation. This is used by zio_done() when an I/O fails
3608 * and we want to give back the block we just allocated.
3609 * This handles both normal blocks and gang blocks.
3610 */
3611static void
3612zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
3613{
b128c09f 3614 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp));
428870ff 3615 ASSERT(zio->io_bp_override == NULL);
b128c09f
BB
3616
3617 if (!BP_IS_HOLE(bp))
428870ff 3618 metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE);
b128c09f
BB
3619
3620 if (gn != NULL) {
1c27024e 3621 for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
b128c09f
BB
3622 zio_dva_unallocate(zio, gn->gn_child[g],
3623 &gn->gn_gbh->zg_blkptr[g]);
3624 }
3625 }
3626}
3627
3628/*
3629 * Try to allocate an intent log block. Return 0 on success, errno on failure.
3630 */
3631int
b5256303
TC
3632zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
3633 uint64_t size, boolean_t *slog)
b128c09f 3634{
428870ff 3635 int error = 1;
4e21fd06 3636 zio_alloc_list_t io_alloc_list;
b128c09f 3637
428870ff
BB
3638 ASSERT(txg > spa_syncing_txg(spa));
3639
4e21fd06 3640 metaslab_trace_init(&io_alloc_list);
cc99f275
DB
3641
3642 /*
3643 * Block pointer fields are useful to metaslabs for stats and debugging.
3644 * Fill in the obvious ones before calling into metaslab_alloc().
3645 */
3646 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
3647 BP_SET_PSIZE(new_bp, size);
3648 BP_SET_LEVEL(new_bp, 0);
3649
492f64e9
PD
3650 /*
3651 * When allocating a zil block, we don't have information about
3652 * the final destination of the block except the objset it's part
3653 * of, so we just hash the objset ID to pick the allocator to get
3654 * some parallelism.
3655 */
be5c6d96 3656 int flags = METASLAB_FASTWRITE | METASLAB_ZIL;
1b50749c
AM
3657 int allocator = (uint_t)cityhash4(0, 0, 0,
3658 os->os_dsl_dataset->ds_object) % spa->spa_alloc_count;
aa755b35
MA
3659 error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
3660 txg, NULL, flags, &io_alloc_list, NULL, allocator);
3661 *slog = (error == 0);
3662 if (error != 0) {
3663 error = metaslab_alloc(spa, spa_embedded_log_class(spa), size,
3664 new_bp, 1, txg, NULL, flags,
3665 &io_alloc_list, NULL, allocator);
3666 }
3667 if (error != 0) {
3668 error = metaslab_alloc(spa, spa_normal_class(spa), size,
3669 new_bp, 1, txg, NULL, flags,
3670 &io_alloc_list, NULL, allocator);
ebf8e3a2 3671 }
4e21fd06 3672 metaslab_trace_fini(&io_alloc_list);
b128c09f
BB
3673
3674 if (error == 0) {
3675 BP_SET_LSIZE(new_bp, size);
3676 BP_SET_PSIZE(new_bp, size);
3677 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
428870ff
BB
3678 BP_SET_CHECKSUM(new_bp,
3679 spa_version(spa) >= SPA_VERSION_SLIM_ZIL
3680 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
b128c09f
BB
3681 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
3682 BP_SET_LEVEL(new_bp, 0);
428870ff 3683 BP_SET_DEDUP(new_bp, 0);
b128c09f 3684 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
b5256303
TC
3685
3686 /*
3687 * encrypted blocks will require an IV and salt. We generate
3688 * these now since we will not be rewriting the bp at
3689 * rewrite time.
3690 */
3691 if (os->os_encrypted) {
3692 uint8_t iv[ZIO_DATA_IV_LEN];
3693 uint8_t salt[ZIO_DATA_SALT_LEN];
3694
3695 BP_SET_CRYPT(new_bp, B_TRUE);
3696 VERIFY0(spa_crypt_get_salt(spa,
3697 dmu_objset_id(os), salt));
3698 VERIFY0(zio_crypt_generate_iv(iv));
3699
3700 zio_crypt_encode_params_bp(new_bp, salt, iv);
3701 }
1ce23dca
PS
3702 } else {
3703 zfs_dbgmsg("%s: zil block allocation failure: "
8e739b2c
RE
3704 "size %llu, error %d", spa_name(spa), (u_longlong_t)size,
3705 error);
b128c09f
BB
3706 }
3707
3708 return (error);
3709}
3710
34dc7c2f
BB
3711/*
3712 * ==========================================================================
3713 * Read and write to physical devices
3714 * ==========================================================================
3715 */
98b25418 3716
98b25418
GW
3717/*
3718 * Issue an I/O to the underlying vdev. Typically the issue pipeline
3719 * stops after this stage and will resume upon I/O completion.
3720 * However, there are instances where the vdev layer may need to
3721 * continue the pipeline when an I/O was not issued. Since the I/O
3722 * that was sent to the vdev layer might be different than the one
3723 * currently active in the pipeline (see vdev_queue_io()), we explicitly
3724 * force the underlying vdev layers to call either zio_execute() or
3725 * zio_interrupt() to ensure that the pipeline continues with the correct I/O.
3726 */
62840030 3727static zio_t *
34dc7c2f
BB
3728zio_vdev_io_start(zio_t *zio)
3729{
3730 vdev_t *vd = zio->io_vd;
34dc7c2f
BB
3731 uint64_t align;
3732 spa_t *spa = zio->io_spa;
3733
193a37cb
TH
3734 zio->io_delay = 0;
3735
b128c09f
BB
3736 ASSERT(zio->io_error == 0);
3737 ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0);
34dc7c2f 3738
b128c09f
BB
3739 if (vd == NULL) {
3740 if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
3741 spa_config_enter(spa, SCL_ZIO, zio, RW_READER);
34dc7c2f 3742
b128c09f
BB
3743 /*
3744 * The mirror_ops handle multiple DVAs in a single BP.
3745 */
98b25418 3746 vdev_mirror_ops.vdev_op_io_start(zio);
62840030 3747 return (NULL);
34dc7c2f
BB
3748 }
3749
3dfb57a3 3750 ASSERT3P(zio->io_logical, !=, zio);
6cb8e530
PZ
3751 if (zio->io_type == ZIO_TYPE_WRITE) {
3752 ASSERT(spa->spa_trust_config);
3753
a1d477c2
MA
3754 /*
3755 * Note: the code can handle other kinds of writes,
3756 * but we don't expect them.
3757 */
2a673e76 3758 if (zio->io_vd->vdev_noalloc) {
6cb8e530
PZ
3759 ASSERT(zio->io_flags &
3760 (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL |
3761 ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE));
3762 }
a1d477c2 3763 }
3dfb57a3 3764
b128c09f
BB
3765 align = 1ULL << vd->vdev_top->vdev_ashift;
3766
b02fe35d
AR
3767 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) &&
3768 P2PHASE(zio->io_size, align) != 0) {
3769 /* Transform logical writes to be a full physical block size. */
34dc7c2f 3770 uint64_t asize = P2ROUNDUP(zio->io_size, align);
a6255b7f 3771 abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize);
178e73b3 3772 ASSERT(vd == vd->vdev_top);
34dc7c2f 3773 if (zio->io_type == ZIO_TYPE_WRITE) {
a6255b7f
DQ
3774 abd_copy(abuf, zio->io_abd, zio->io_size);
3775 abd_zero_off(abuf, zio->io_size, asize - zio->io_size);
34dc7c2f 3776 }
b128c09f 3777 zio_push_transform(zio, abuf, asize, asize, zio_subblock);
34dc7c2f
BB
3778 }
3779
b02fe35d
AR
3780 /*
3781 * If this is not a physical io, make sure that it is properly aligned
3782 * before proceeding.
3783 */
3784 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) {
3785 ASSERT0(P2PHASE(zio->io_offset, align));
3786 ASSERT0(P2PHASE(zio->io_size, align));
3787 } else {
3788 /*
3789 * For physical writes, we allow 512b aligned writes and assume
3790 * the device will perform a read-modify-write as necessary.
3791 */
3792 ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE));
3793 ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE));
3794 }
3795
572e2857 3796 VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa));
fb5f0bc8
BB
3797
3798 /*
3799 * If this is a repair I/O, and there's no self-healing involved --
3800 * that is, we're just resilvering what we expect to resilver --
3801 * then don't do the I/O unless zio's txg is actually in vd's DTL.
9e052db4
MA
3802 * This prevents spurious resilvering.
3803 *
3804 * There are a few ways that we can end up creating these spurious
3805 * resilver i/os:
3806 *
3807 * 1. A resilver i/o will be issued if any DVA in the BP has a
3808 * dirty DTL. The mirror code will issue resilver writes to
3809 * each DVA, including the one(s) that are not on vdevs with dirty
3810 * DTLs.
3811 *
3812 * 2. With nested replication, which happens when we have a
3813 * "replacing" or "spare" vdev that's a child of a mirror or raidz.
3814 * For example, given mirror(replacing(A+B), C), it's likely that
3815 * only A is out of date (it's the new device). In this case, we'll
3816 * read from C, then use the data to resilver A+B -- but we don't
3817 * actually want to resilver B, just A. The top-level mirror has no
3818 * way to know this, so instead we just discard unnecessary repairs
3819 * as we work our way down the vdev tree.
3820 *
3821 * 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc.
3822 * The same logic applies to any form of nested replication: ditto
3823 * + mirror, RAID-Z + replacing, etc.
3824 *
3825 * However, indirect vdevs point off to other vdevs which may have
3826 * DTL's, so we never bypass them. The child i/os on concrete vdevs
3827 * will be properly bypassed instead.
b2255edc
BB
3828 *
3829 * Leaf DTL_PARTIAL can be empty when a legitimate write comes from
3830 * a dRAID spare vdev. For example, when a dRAID spare is first
3831 * used, its spare blocks need to be written to but the leaf vdev's
3832 * of such blocks can have empty DTL_PARTIAL.
3833 *
3834 * There seemed no clean way to allow such writes while bypassing
3835 * spurious ones. At this point, just avoid all bypassing for dRAID
3836 * for correctness.
fb5f0bc8
BB
3837 */
3838 if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) &&
3839 !(zio->io_flags & ZIO_FLAG_SELF_HEAL) &&
3840 zio->io_txg != 0 && /* not a delegated i/o */
9e052db4 3841 vd->vdev_ops != &vdev_indirect_ops &&
b2255edc 3842 vd->vdev_top->vdev_ops != &vdev_draid_ops &&
fb5f0bc8
BB
3843 !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) {
3844 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
fb5f0bc8 3845 zio_vdev_io_bypass(zio);
62840030 3846 return (zio);
fb5f0bc8 3847 }
34dc7c2f 3848
b2255edc
BB
3849 /*
3850 * Select the next best leaf I/O to process. Distributed spares are
3851 * excluded since they dispatch the I/O directly to a leaf vdev after
3852 * applying the dRAID mapping.
3853 */
3854 if (vd->vdev_ops->vdev_op_leaf &&
3855 vd->vdev_ops != &vdev_draid_spare_ops &&
3856 (zio->io_type == ZIO_TYPE_READ ||
3857 zio->io_type == ZIO_TYPE_WRITE ||
3858 zio->io_type == ZIO_TYPE_TRIM)) {
b128c09f 3859
b0bc7a84 3860 if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio))
62840030 3861 return (zio);
b128c09f
BB
3862
3863 if ((zio = vdev_queue_io(zio)) == NULL)
62840030 3864 return (NULL);
b128c09f
BB
3865
3866 if (!vdev_accessible(vd, zio)) {
2e528b49 3867 zio->io_error = SET_ERROR(ENXIO);
b128c09f 3868 zio_interrupt(zio);
62840030 3869 return (NULL);
b128c09f 3870 }
67103816 3871 zio->io_delay = gethrtime();
b128c09f
BB
3872 }
3873
98b25418 3874 vd->vdev_ops->vdev_op_io_start(zio);
62840030 3875 return (NULL);
34dc7c2f
BB
3876}
3877
62840030 3878static zio_t *
34dc7c2f
BB
3879zio_vdev_io_done(zio_t *zio)
3880{
b128c09f
BB
3881 vdev_t *vd = zio->io_vd;
3882 vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops;
3883 boolean_t unexpected_error = B_FALSE;
34dc7c2f 3884
ddc751d5 3885 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
62840030 3886 return (NULL);
ddc751d5 3887 }
34dc7c2f 3888
1b939560
BB
3889 ASSERT(zio->io_type == ZIO_TYPE_READ ||
3890 zio->io_type == ZIO_TYPE_WRITE || zio->io_type == ZIO_TYPE_TRIM);
b128c09f 3891
193a37cb
TH
3892 if (zio->io_delay)
3893 zio->io_delay = gethrtime() - zio->io_delay;
3894
b2255edc
BB
3895 if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
3896 vd->vdev_ops != &vdev_draid_spare_ops) {
b128c09f
BB
3897 vdev_queue_io_done(zio);
3898
3899 if (zio->io_type == ZIO_TYPE_WRITE)
3900 vdev_cache_write(zio);
3901
3902 if (zio_injection_enabled && zio->io_error == 0)
d977122d
DB
3903 zio->io_error = zio_handle_device_injections(vd, zio,
3904 EIO, EILSEQ);
b128c09f
BB
3905
3906 if (zio_injection_enabled && zio->io_error == 0)
3907 zio->io_error = zio_handle_label_injection(zio, EIO);
3908
1b939560 3909 if (zio->io_error && zio->io_type != ZIO_TYPE_TRIM) {
b128c09f 3910 if (!vdev_accessible(vd, zio)) {
2e528b49 3911 zio->io_error = SET_ERROR(ENXIO);
b128c09f
BB
3912 } else {
3913 unexpected_error = B_TRUE;
3914 }
3915 }
3916 }
3917
3918 ops->vdev_op_io_done(zio);
34dc7c2f 3919
f43615d0 3920 if (unexpected_error)
d164b209 3921 VERIFY(vdev_probe(vd, zio) == NULL);
34dc7c2f 3922
62840030 3923 return (zio);
34dc7c2f
BB
3924}
3925
a8b2e306
TC
3926/*
3927 * This function is used to change the priority of an existing zio that is
3928 * currently in-flight. This is used by the arc to upgrade priority in the
3929 * event that a demand read is made for a block that is currently queued
3930 * as a scrub or async read IO. Otherwise, the high priority read request
3931 * would end up having to wait for the lower priority IO.
3932 */
3933void
3934zio_change_priority(zio_t *pio, zio_priority_t priority)
3935{
3936 zio_t *cio, *cio_next;
3937 zio_link_t *zl = NULL;
3938
3939 ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
3940
3941 if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) {
3942 vdev_queue_change_io_priority(pio, priority);
3943 } else {
3944 pio->io_priority = priority;
3945 }
3946
3947 mutex_enter(&pio->io_lock);
3948 for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
3949 cio_next = zio_walk_children(pio, &zl);
3950 zio_change_priority(cio, priority);
3951 }
3952 mutex_exit(&pio->io_lock);
3953}
3954
428870ff
BB
3955/*
3956 * For non-raidz ZIOs, we can just copy aside the bad data read from the
3957 * disk, and use that to finish the checksum ereport later.
3958 */
3959static void
3960zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr,
84c07ada 3961 const abd_t *good_buf)
428870ff
BB
3962{
3963 /* no processing needed */
3964 zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE);
3965}
3966
428870ff 3967void
330c6c05 3968zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr)
428870ff 3969{
84c07ada 3970 void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size);
428870ff 3971
84c07ada 3972 abd_copy(abd, zio->io_abd, zio->io_size);
428870ff
BB
3973
3974 zcr->zcr_cbinfo = zio->io_size;
84c07ada 3975 zcr->zcr_cbdata = abd;
428870ff 3976 zcr->zcr_finish = zio_vsd_default_cksum_finish;
84c07ada 3977 zcr->zcr_free = zio_abd_free;
428870ff
BB
3978}
3979
62840030 3980static zio_t *
34dc7c2f
BB
3981zio_vdev_io_assess(zio_t *zio)
3982{
3983 vdev_t *vd = zio->io_vd;
b128c09f 3984
ddc751d5 3985 if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
62840030 3986 return (NULL);
ddc751d5 3987 }
b128c09f
BB
3988
3989 if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
3990 spa_config_exit(zio->io_spa, SCL_ZIO, zio);
3991
3992 if (zio->io_vsd != NULL) {
428870ff 3993 zio->io_vsd_ops->vsd_free(zio);
b128c09f 3994 zio->io_vsd = NULL;
34dc7c2f
BB
3995 }
3996
b128c09f 3997 if (zio_injection_enabled && zio->io_error == 0)
34dc7c2f
BB
3998 zio->io_error = zio_handle_fault_injection(zio, EIO);
3999
4000 /*
4001 * If the I/O failed, determine whether we should attempt to retry it.
428870ff
BB
4002 *
4003 * On retry, we cut in line in the issue queue, since we don't want
4004 * compression/checksumming/etc. work to prevent our (cheap) IO reissue.
34dc7c2f 4005 */
b128c09f
BB
4006 if (zio->io_error && vd == NULL &&
4007 !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) {
4008 ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */
4009 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */
34dc7c2f 4010 zio->io_error = 0;
b128c09f
BB
4011 zio->io_flags |= ZIO_FLAG_IO_RETRY |
4012 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE;
428870ff
BB
4013 zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1;
4014 zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE,
4015 zio_requeue_io_start_cut_in_line);
62840030 4016 return (NULL);
34dc7c2f
BB
4017 }
4018
b128c09f
BB
4019 /*
4020 * If we got an error on a leaf device, convert it to ENXIO
4021 * if the device is not accessible at all.
4022 */
4023 if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf &&
4024 !vdev_accessible(vd, zio))
2e528b49 4025 zio->io_error = SET_ERROR(ENXIO);
b128c09f
BB
4026
4027 /*
4028 * If we can't write to an interior vdev (mirror or RAID-Z),
4029 * set vdev_cant_write so that we stop trying to allocate from it.
4030 */
4031 if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE &&
13fe0198 4032 vd != NULL && !vd->vdev_ops->vdev_op_leaf) {
2b56a634
MA
4033 vdev_dbgmsg(vd, "zio_vdev_io_assess(zio=%px) setting "
4034 "cant_write=TRUE due to write failure with ENXIO",
4035 zio);
b128c09f 4036 vd->vdev_cant_write = B_TRUE;
13fe0198 4037 }
b128c09f 4038
298ec40b
GM
4039 /*
4040 * If a cache flush returns ENOTSUP or ENOTTY, we know that no future
1b939560
BB
4041 * attempts will ever succeed. In this case we set a persistent
4042 * boolean flag so that we don't bother with it in the future.
298ec40b
GM
4043 */
4044 if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) &&
4045 zio->io_type == ZIO_TYPE_IOCTL &&
4046 zio->io_cmd == DKIOCFLUSHWRITECACHE && vd != NULL)
4047 vd->vdev_nowritecache = B_TRUE;
4048
b128c09f
BB
4049 if (zio->io_error)
4050 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
4051
e8b96c60
MA
4052 if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
4053 zio->io_physdone != NULL) {
4054 ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED));
4055 ASSERT(zio->io_child_type == ZIO_CHILD_VDEV);
4056 zio->io_physdone(zio->io_logical);
4057 }
4058
62840030 4059 return (zio);
34dc7c2f
BB
4060}
4061
4062void
4063zio_vdev_io_reissue(zio_t *zio)
4064{
4065 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
4066 ASSERT(zio->io_error == 0);
4067
428870ff 4068 zio->io_stage >>= 1;
34dc7c2f
BB
4069}
4070
4071void
4072zio_vdev_io_redone(zio_t *zio)
4073{
4074 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE);
4075
428870ff 4076 zio->io_stage >>= 1;
34dc7c2f
BB
4077}
4078
4079void
4080zio_vdev_io_bypass(zio_t *zio)
4081{
4082 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
4083 ASSERT(zio->io_error == 0);
4084
4085 zio->io_flags |= ZIO_FLAG_IO_BYPASS;
428870ff 4086 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1;
34dc7c2f
BB
4087}
4088
b5256303
TC
4089/*
4090 * ==========================================================================
4091 * Encrypt and store encryption parameters
4092 * ==========================================================================
4093 */
4094
4095
4096/*
4097 * This function is used for ZIO_STAGE_ENCRYPT. It is responsible for
4098 * managing the storage of encryption parameters and passing them to the
4099 * lower-level encryption functions.
4100 */
62840030 4101static zio_t *
b5256303
TC
4102zio_encrypt(zio_t *zio)
4103{
4104 zio_prop_t *zp = &zio->io_prop;
4105 spa_t *spa = zio->io_spa;
4106 blkptr_t *bp = zio->io_bp;
4107 uint64_t psize = BP_GET_PSIZE(bp);
ae76f45c 4108 uint64_t dsobj = zio->io_bookmark.zb_objset;
b5256303
TC
4109 dmu_object_type_t ot = BP_GET_TYPE(bp);
4110 void *enc_buf = NULL;
4111 abd_t *eabd = NULL;
4112 uint8_t salt[ZIO_DATA_SALT_LEN];
4113 uint8_t iv[ZIO_DATA_IV_LEN];
4114 uint8_t mac[ZIO_DATA_MAC_LEN];
4115 boolean_t no_crypt = B_FALSE;
4116
4117 /* the root zio already encrypted the data */
4118 if (zio->io_child_type == ZIO_CHILD_GANG)
62840030 4119 return (zio);
b5256303
TC
4120
4121 /* only ZIL blocks are re-encrypted on rewrite */
4122 if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG)
62840030 4123 return (zio);
b5256303
TC
4124
4125 if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) {
4126 BP_SET_CRYPT(bp, B_FALSE);
62840030 4127 return (zio);
b5256303
TC
4128 }
4129
4130 /* if we are doing raw encryption set the provided encryption params */
4131 if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) {
ae76f45c 4132 ASSERT0(BP_GET_LEVEL(bp));
b5256303
TC
4133 BP_SET_CRYPT(bp, B_TRUE);
4134 BP_SET_BYTEORDER(bp, zp->zp_byteorder);
4135 if (ot != DMU_OT_OBJSET)
4136 zio_crypt_encode_mac_bp(bp, zp->zp_mac);
ae76f45c
TC
4137
4138 /* dnode blocks must be written out in the provided byteorder */
4139 if (zp->zp_byteorder != ZFS_HOST_BYTEORDER &&
4140 ot == DMU_OT_DNODE) {
4141 void *bswap_buf = zio_buf_alloc(psize);
4142 abd_t *babd = abd_get_from_buf(bswap_buf, psize);
4143
4144 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
4145 abd_copy_to_buf(bswap_buf, zio->io_abd, psize);
4146 dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf,
4147 psize);
4148
4149 abd_take_ownership_of_buf(babd, B_TRUE);
4150 zio_push_transform(zio, babd, psize, psize, NULL);
4151 }
4152
b5256303
TC
4153 if (DMU_OT_IS_ENCRYPTED(ot))
4154 zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv);
62840030 4155 return (zio);
b5256303
TC
4156 }
4157
4158 /* indirect blocks only maintain a cksum of the lower level MACs */
4159 if (BP_GET_LEVEL(bp) > 0) {
4160 BP_SET_CRYPT(bp, B_TRUE);
4161 VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE,
4162 zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp),
4163 mac));
4164 zio_crypt_encode_mac_bp(bp, mac);
62840030 4165 return (zio);
b5256303
TC
4166 }
4167
4168 /*
4169 * Objset blocks are a special case since they have 2 256-bit MACs
4170 * embedded within them.
4171 */
4172 if (ot == DMU_OT_OBJSET) {
4173 ASSERT0(DMU_OT_IS_ENCRYPTED(ot));
4174 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
4175 BP_SET_CRYPT(bp, B_TRUE);
ae76f45c
TC
4176 VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj,
4177 zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp)));
62840030 4178 return (zio);
b5256303
TC
4179 }
4180
4181 /* unencrypted object types are only authenticated with a MAC */
4182 if (!DMU_OT_IS_ENCRYPTED(ot)) {
4183 BP_SET_CRYPT(bp, B_TRUE);
ae76f45c
TC
4184 VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj,
4185 zio->io_abd, psize, mac));
b5256303 4186 zio_crypt_encode_mac_bp(bp, mac);
62840030 4187 return (zio);
b5256303
TC
4188 }
4189
4190 /*
4191 * Later passes of sync-to-convergence may decide to rewrite data
4192 * in place to avoid more disk reallocations. This presents a problem
d611989f 4193 * for encryption because this constitutes rewriting the new data with
b5256303
TC
4194 * the same encryption key and IV. However, this only applies to blocks
4195 * in the MOS (particularly the spacemaps) and we do not encrypt the
4196 * MOS. We assert that the zio is allocating or an intent log write
4197 * to enforce this.
4198 */
4199 ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG);
4200 ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG);
4201 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION));
4202 ASSERT3U(psize, !=, 0);
4203
4204 enc_buf = zio_buf_alloc(psize);
4205 eabd = abd_get_from_buf(enc_buf, psize);
4206 abd_take_ownership_of_buf(eabd, B_TRUE);
4207
4208 /*
4209 * For an explanation of what encryption parameters are stored
4210 * where, see the block comment in zio_crypt.c.
4211 */
4212 if (ot == DMU_OT_INTENT_LOG) {
4213 zio_crypt_decode_params_bp(bp, salt, iv);
4214 } else {
4215 BP_SET_CRYPT(bp, B_TRUE);
4216 }
4217
4218 /* Perform the encryption. This should not fail */
be9a5c35
TC
4219 VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark,
4220 BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp),
4221 salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt));
b5256303
TC
4222
4223 /* encode encryption metadata into the bp */
4224 if (ot == DMU_OT_INTENT_LOG) {
4225 /*
4226 * ZIL blocks store the MAC in the embedded checksum, so the
4227 * transform must always be applied.
4228 */
4229 zio_crypt_encode_mac_zil(enc_buf, mac);
4230 zio_push_transform(zio, eabd, psize, psize, NULL);
4231 } else {
4232 BP_SET_CRYPT(bp, B_TRUE);
4233 zio_crypt_encode_params_bp(bp, salt, iv);
4234 zio_crypt_encode_mac_bp(bp, mac);
4235
4236 if (no_crypt) {
4237 ASSERT3U(ot, ==, DMU_OT_DNODE);
4238 abd_free(eabd);
4239 } else {
4240 zio_push_transform(zio, eabd, psize, psize, NULL);
4241 }
4242 }
4243
62840030 4244 return (zio);
b5256303
TC
4245}
4246
34dc7c2f
BB
4247/*
4248 * ==========================================================================
4249 * Generate and verify checksums
4250 * ==========================================================================
4251 */
62840030 4252static zio_t *
34dc7c2f
BB
4253zio_checksum_generate(zio_t *zio)
4254{
34dc7c2f 4255 blkptr_t *bp = zio->io_bp;
b128c09f 4256 enum zio_checksum checksum;
34dc7c2f 4257
b128c09f
BB
4258 if (bp == NULL) {
4259 /*
4260 * This is zio_write_phys().
4261 * We're either generating a label checksum, or none at all.
4262 */
4263 checksum = zio->io_prop.zp_checksum;
34dc7c2f 4264
b128c09f 4265 if (checksum == ZIO_CHECKSUM_OFF)
62840030 4266 return (zio);
b128c09f
BB
4267
4268 ASSERT(checksum == ZIO_CHECKSUM_LABEL);
4269 } else {
4270 if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) {
4271 ASSERT(!IO_IS_ALLOCATING(zio));
4272 checksum = ZIO_CHECKSUM_GANG_HEADER;
4273 } else {
4274 checksum = BP_GET_CHECKSUM(bp);
4275 }
4276 }
34dc7c2f 4277
a6255b7f 4278 zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size);
34dc7c2f 4279
62840030 4280 return (zio);
34dc7c2f
BB
4281}
4282
62840030 4283static zio_t *
b128c09f 4284zio_checksum_verify(zio_t *zio)
34dc7c2f 4285{
428870ff 4286 zio_bad_cksum_t info;
b128c09f
BB
4287 blkptr_t *bp = zio->io_bp;
4288 int error;
34dc7c2f 4289
428870ff
BB
4290 ASSERT(zio->io_vd != NULL);
4291
b128c09f
BB
4292 if (bp == NULL) {
4293 /*
4294 * This is zio_read_phys().
4295 * We're either verifying a label checksum, or nothing at all.
4296 */
4297 if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF)
62840030 4298 return (zio);
34dc7c2f 4299
b2255edc 4300 ASSERT3U(zio->io_prop.zp_checksum, ==, ZIO_CHECKSUM_LABEL);
b128c09f 4301 }
34dc7c2f 4302
428870ff 4303 if ((error = zio_checksum_error(zio, &info)) != 0) {
b128c09f 4304 zio->io_error = error;
7a3066ff
MA
4305 if (error == ECKSUM &&
4306 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
03e02e5b 4307 (void) zfs_ereport_start_checksum(zio->io_spa,
b5256303 4308 zio->io_vd, &zio->io_bookmark, zio,
330c6c05 4309 zio->io_offset, zio->io_size, &info);
03e02e5b
DB
4310 mutex_enter(&zio->io_vd->vdev_stat_lock);
4311 zio->io_vd->vdev_stat.vs_checksum_errors++;
4312 mutex_exit(&zio->io_vd->vdev_stat_lock);
b128c09f 4313 }
34dc7c2f
BB
4314 }
4315
62840030 4316 return (zio);
34dc7c2f
BB
4317}
4318
4319/*
4320 * Called by RAID-Z to ensure we don't compute the checksum twice.
4321 */
4322void
4323zio_checksum_verified(zio_t *zio)
4324{
428870ff 4325 zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
34dc7c2f
BB
4326}
4327
4328/*
b128c09f
BB
4329 * ==========================================================================
4330 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
9b67f605 4331 * An error of 0 indicates success. ENXIO indicates whole-device failure,
d611989f 4332 * which may be transient (e.g. unplugged) or permanent. ECKSUM and EIO
b128c09f
BB
4333 * indicate errors that are specific to one I/O, and most likely permanent.
4334 * Any other error is presumed to be worse because we weren't expecting it.
4335 * ==========================================================================
34dc7c2f 4336 */
b128c09f
BB
4337int
4338zio_worst_error(int e1, int e2)
34dc7c2f 4339{
b128c09f
BB
4340 static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO };
4341 int r1, r2;
4342
4343 for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++)
4344 if (e1 == zio_error_rank[r1])
4345 break;
34dc7c2f 4346
b128c09f
BB
4347 for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++)
4348 if (e2 == zio_error_rank[r2])
4349 break;
4350
4351 return (r1 > r2 ? e1 : e2);
34dc7c2f
BB
4352}
4353
4354/*
4355 * ==========================================================================
b128c09f 4356 * I/O completion
34dc7c2f
BB
4357 * ==========================================================================
4358 */
62840030 4359static zio_t *
b128c09f 4360zio_ready(zio_t *zio)
34dc7c2f 4361{
b128c09f 4362 blkptr_t *bp = zio->io_bp;
d164b209 4363 zio_t *pio, *pio_next;
3dfb57a3 4364 zio_link_t *zl = NULL;
34dc7c2f 4365
ddc751d5
GW
4366 if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT,
4367 ZIO_WAIT_READY)) {
62840030 4368 return (NULL);
ddc751d5 4369 }
34dc7c2f 4370
9babb374 4371 if (zio->io_ready) {
b128c09f 4372 ASSERT(IO_IS_ALLOCATING(zio));
03c6040b
GW
4373 ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) ||
4374 (zio->io_flags & ZIO_FLAG_NOPWRITE));
b128c09f 4375 ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0);
34dc7c2f 4376
b128c09f
BB
4377 zio->io_ready(zio);
4378 }
34dc7c2f 4379
b128c09f
BB
4380 if (bp != NULL && bp != &zio->io_bp_copy)
4381 zio->io_bp_copy = *bp;
34dc7c2f 4382
3dfb57a3 4383 if (zio->io_error != 0) {
b128c09f 4384 zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
34dc7c2f 4385
3dfb57a3
DB
4386 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
4387 ASSERT(IO_IS_ALLOCATING(zio));
4388 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
cc99f275
DB
4389 ASSERT(zio->io_metaslab_class != NULL);
4390
3dfb57a3
DB
4391 /*
4392 * We were unable to allocate anything, unreserve and
4393 * issue the next I/O to allocate.
4394 */
4395 metaslab_class_throttle_unreserve(
cc99f275
DB
4396 zio->io_metaslab_class, zio->io_prop.zp_copies,
4397 zio->io_allocator, zio);
492f64e9 4398 zio_allocate_dispatch(zio->io_spa, zio->io_allocator);
3dfb57a3
DB
4399 }
4400 }
4401
d164b209
BB
4402 mutex_enter(&zio->io_lock);
4403 zio->io_state[ZIO_WAIT_READY] = 1;
3dfb57a3 4404 pio = zio_walk_parents(zio, &zl);
d164b209
BB
4405 mutex_exit(&zio->io_lock);
4406
4407 /*
4408 * As we notify zio's parents, new parents could be added.
4409 * New parents go to the head of zio's io_parent_list, however,
4410 * so we will (correctly) not notify them. The remainder of zio's
4411 * io_parent_list, from 'pio_next' onward, cannot change because
4412 * all parents must wait for us to be done before they can be done.
4413 */
4414 for (; pio != NULL; pio = pio_next) {
3dfb57a3 4415 pio_next = zio_walk_parents(zio, &zl);
62840030 4416 zio_notify_parent(pio, zio, ZIO_WAIT_READY, NULL);
d164b209 4417 }
34dc7c2f 4418
428870ff
BB
4419 if (zio->io_flags & ZIO_FLAG_NODATA) {
4420 if (BP_IS_GANG(bp)) {
4421 zio->io_flags &= ~ZIO_FLAG_NODATA;
4422 } else {
a6255b7f 4423 ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE);
428870ff
BB
4424 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
4425 }
4426 }
4427
4428 if (zio_injection_enabled &&
4429 zio->io_spa->spa_syncing_txg == zio->io_txg)
4430 zio_handle_ignored_writes(zio);
4431
62840030 4432 return (zio);
34dc7c2f
BB
4433}
4434
3dfb57a3
DB
4435/*
4436 * Update the allocation throttle accounting.
4437 */
4438static void
4439zio_dva_throttle_done(zio_t *zio)
4440{
2a8ba608 4441 zio_t *lio __maybe_unused = zio->io_logical;
3dfb57a3
DB
4442 zio_t *pio = zio_unique_parent(zio);
4443 vdev_t *vd = zio->io_vd;
4444 int flags = METASLAB_ASYNC_ALLOC;
4445
4446 ASSERT3P(zio->io_bp, !=, NULL);
4447 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
4448 ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE);
4449 ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
4450 ASSERT(vd != NULL);
4451 ASSERT3P(vd, ==, vd->vdev_top);
21df134f
SB
4452 ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY));
4453 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
3dfb57a3
DB
4454 ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING);
4455 ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE));
4456 ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA));
4457
4458 /*
4459 * Parents of gang children can have two flavors -- ones that
4460 * allocated the gang header (will have ZIO_FLAG_IO_REWRITE set)
4461 * and ones that allocated the constituent blocks. The allocation
4462 * throttle needs to know the allocating parent zio so we must find
4463 * it here.
4464 */
4465 if (pio->io_child_type == ZIO_CHILD_GANG) {
4466 /*
4467 * If our parent is a rewrite gang child then our grandparent
4468 * would have been the one that performed the allocation.
4469 */
4470 if (pio->io_flags & ZIO_FLAG_IO_REWRITE)
4471 pio = zio_unique_parent(pio);
4472 flags |= METASLAB_GANG_CHILD;
4473 }
4474
4475 ASSERT(IO_IS_ALLOCATING(pio));
4476 ASSERT3P(zio, !=, zio->io_logical);
4477 ASSERT(zio->io_logical != NULL);
4478 ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
4479 ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE);
cc99f275 4480 ASSERT(zio->io_metaslab_class != NULL);
3dfb57a3
DB
4481
4482 mutex_enter(&pio->io_lock);
492f64e9
PD
4483 metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags,
4484 pio->io_allocator, B_TRUE);
3dfb57a3
DB
4485 mutex_exit(&pio->io_lock);
4486
cc99f275
DB
4487 metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1,
4488 pio->io_allocator, pio);
3dfb57a3
DB
4489
4490 /*
4491 * Call into the pipeline to see if there is more work that
4492 * needs to be done. If there is work to be done it will be
4493 * dispatched to another taskq thread.
4494 */
492f64e9 4495 zio_allocate_dispatch(zio->io_spa, pio->io_allocator);
3dfb57a3
DB
4496}
4497
62840030 4498static zio_t *
b128c09f 4499zio_done(zio_t *zio)
34dc7c2f 4500{
3dfb57a3
DB
4501 /*
4502 * Always attempt to keep stack usage minimal here since
d611989f 4503 * we can be called recursively up to 19 levels deep.
3dfb57a3 4504 */
84c07ada 4505 const uint64_t psize = zio->io_size;
d164b209 4506 zio_t *pio, *pio_next;
3dfb57a3 4507 zio_link_t *zl = NULL;
34dc7c2f 4508
b128c09f 4509 /*
9babb374 4510 * If our children haven't all completed,
b128c09f
BB
4511 * wait for them and then repeat this pipeline stage.
4512 */
ddc751d5 4513 if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) {
62840030 4514 return (NULL);
ddc751d5 4515 }
34dc7c2f 4516
3dfb57a3
DB
4517 /*
4518 * If the allocation throttle is enabled, then update the accounting.
4519 * We only track child I/Os that are part of an allocating async
4520 * write. We must do this since the allocation is performed
4521 * by the logical I/O but the actual write is done by child I/Os.
4522 */
4523 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING &&
4524 zio->io_child_type == ZIO_CHILD_VDEV) {
cc99f275
DB
4525 ASSERT(zio->io_metaslab_class != NULL);
4526 ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled);
3dfb57a3
DB
4527 zio_dva_throttle_done(zio);
4528 }
4529
4530 /*
4531 * If the allocation throttle is enabled, verify that
4532 * we have decremented the refcounts for every I/O that was throttled.
4533 */
4534 if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
4535 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
4536 ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
4537 ASSERT(zio->io_bp != NULL);
cc99f275 4538
492f64e9
PD
4539 metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio,
4540 zio->io_allocator);
f8020c93
AM
4541 VERIFY(zfs_refcount_not_held(&zio->io_metaslab_class->
4542 mc_allocator[zio->io_allocator].mca_alloc_slots, zio));
3dfb57a3
DB
4543 }
4544
4545
1c27024e
DB
4546 for (int c = 0; c < ZIO_CHILD_TYPES; c++)
4547 for (int w = 0; w < ZIO_WAIT_TYPES; w++)
b128c09f
BB
4548 ASSERT(zio->io_children[c][w] == 0);
4549
9b67f605 4550 if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) {
c776b317
BB
4551 ASSERT(zio->io_bp->blk_pad[0] == 0);
4552 ASSERT(zio->io_bp->blk_pad[1] == 0);
861166b0 4553 ASSERT(memcmp(zio->io_bp, &zio->io_bp_copy,
d1d7e268 4554 sizeof (blkptr_t)) == 0 ||
c776b317
BB
4555 (zio->io_bp == zio_unique_parent(zio)->io_bp));
4556 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) &&
428870ff 4557 zio->io_bp_override == NULL &&
b128c09f 4558 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) {
d1d7e268
MK
4559 ASSERT3U(zio->io_prop.zp_copies, <=,
4560 BP_GET_NDVAS(zio->io_bp));
c776b317 4561 ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 ||
d1d7e268
MK
4562 (BP_COUNT_GANG(zio->io_bp) ==
4563 BP_GET_NDVAS(zio->io_bp)));
b128c09f 4564 }
03c6040b
GW
4565 if (zio->io_flags & ZIO_FLAG_NOPWRITE)
4566 VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
b128c09f
BB
4567 }
4568
4569 /*
428870ff 4570 * If there were child vdev/gang/ddt errors, they apply to us now.
b128c09f
BB
4571 */
4572 zio_inherit_child_errors(zio, ZIO_CHILD_VDEV);
4573 zio_inherit_child_errors(zio, ZIO_CHILD_GANG);
428870ff
BB
4574 zio_inherit_child_errors(zio, ZIO_CHILD_DDT);
4575
4576 /*
4577 * If the I/O on the transformed data was successful, generate any
4578 * checksum reports now while we still have the transformed data.
4579 */
4580 if (zio->io_error == 0) {
4581 while (zio->io_cksum_report != NULL) {
4582 zio_cksum_report_t *zcr = zio->io_cksum_report;
4583 uint64_t align = zcr->zcr_align;
a6255b7f 4584 uint64_t asize = P2ROUNDUP(psize, align);
a6255b7f
DQ
4585 abd_t *adata = zio->io_abd;
4586
f2286383 4587 if (adata != NULL && asize != psize) {
84c07ada 4588 adata = abd_alloc(asize, B_TRUE);
a6255b7f
DQ
4589 abd_copy(adata, zio->io_abd, psize);
4590 abd_zero_off(adata, psize, asize - psize);
428870ff
BB
4591 }
4592
4593 zio->io_cksum_report = zcr->zcr_next;
4594 zcr->zcr_next = NULL;
84c07ada 4595 zcr->zcr_finish(zcr, adata);
428870ff
BB
4596 zfs_ereport_free_checksum(zcr);
4597
f2286383 4598 if (adata != NULL && asize != psize)
a6255b7f 4599 abd_free(adata);
428870ff
BB
4600 }
4601 }
b128c09f
BB
4602
4603 zio_pop_transforms(zio); /* note: may set zio->io_error */
4604
a6255b7f 4605 vdev_stat_update(zio, psize);
b128c09f 4606
a69052be 4607 /*
cc92e9d0 4608 * If this I/O is attached to a particular vdev is slow, exceeding
72f53c56
MJ
4609 * 30 seconds to complete, post an error described the I/O delay.
4610 * We ignore these errors if the device is currently unavailable.
a69052be 4611 */
ad796b8a
TH
4612 if (zio->io_delay >= MSEC2NSEC(zio_slow_io_ms)) {
4613 if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) {
4614 /*
4615 * We want to only increment our slow IO counters if
4616 * the IO is valid (i.e. not if the drive is removed).
4617 *
4618 * zfs_ereport_post() will also do these checks, but
4619 * it can also ratelimit and have other failures, so we
4620 * need to increment the slow_io counters independent
4621 * of it.
4622 */
4623 if (zfs_ereport_is_valid(FM_EREPORT_ZFS_DELAY,
4624 zio->io_spa, zio->io_vd, zio)) {
4625 mutex_enter(&zio->io_vd->vdev_stat_lock);
4626 zio->io_vd->vdev_stat.vs_slow_ios++;
4627 mutex_exit(&zio->io_vd->vdev_stat_lock);
4628
1144586b 4629 (void) zfs_ereport_post(FM_EREPORT_ZFS_DELAY,
ad796b8a 4630 zio->io_spa, zio->io_vd, &zio->io_bookmark,
4f072827 4631 zio, 0);
ad796b8a
TH
4632 }
4633 }
72f53c56 4634 }
a69052be 4635
b128c09f
BB
4636 if (zio->io_error) {
4637 /*
4638 * If this I/O is attached to a particular vdev,
4639 * generate an error message describing the I/O failure
4640 * at the block level. We ignore these errors if the
4641 * device is currently unavailable.
4642 */
c776b317 4643 if (zio->io_error != ECKSUM && zio->io_vd != NULL &&
2bbec1c9 4644 !vdev_is_dead(zio->io_vd)) {
4f072827
DB
4645 int ret = zfs_ereport_post(FM_EREPORT_ZFS_IO,
4646 zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
4647 if (ret != EALREADY) {
4648 mutex_enter(&zio->io_vd->vdev_stat_lock);
4649 if (zio->io_type == ZIO_TYPE_READ)
4650 zio->io_vd->vdev_stat.vs_read_errors++;
4651 else if (zio->io_type == ZIO_TYPE_WRITE)
4652 zio->io_vd->vdev_stat.vs_write_errors++;
4653 mutex_exit(&zio->io_vd->vdev_stat_lock);
2bbec1c9 4654 }
2bbec1c9 4655 }
34dc7c2f 4656
428870ff
BB
4657 if ((zio->io_error == EIO || !(zio->io_flags &
4658 (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) &&
c776b317 4659 zio == zio->io_logical) {
b128c09f
BB
4660 /*
4661 * For logical I/O requests, tell the SPA to log the
4662 * error and generate a logical data ereport.
4663 */
b5256303 4664 spa_log_error(zio->io_spa, &zio->io_bookmark);
1144586b 4665 (void) zfs_ereport_post(FM_EREPORT_ZFS_DATA,
4f072827 4666 zio->io_spa, NULL, &zio->io_bookmark, zio, 0);
b128c09f
BB
4667 }
4668 }
34dc7c2f 4669
c776b317 4670 if (zio->io_error && zio == zio->io_logical) {
b128c09f
BB
4671 /*
4672 * Determine whether zio should be reexecuted. This will
4673 * propagate all the way to the root via zio_notify_parent().
4674 */
c776b317 4675 ASSERT(zio->io_vd == NULL && zio->io_bp != NULL);
428870ff 4676 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
b128c09f 4677
428870ff
BB
4678 if (IO_IS_ALLOCATING(zio) &&
4679 !(zio->io_flags & ZIO_FLAG_CANFAIL)) {
b128c09f
BB
4680 if (zio->io_error != ENOSPC)
4681 zio->io_reexecute |= ZIO_REEXECUTE_NOW;
4682 else
4683 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
428870ff 4684 }
b128c09f
BB
4685
4686 if ((zio->io_type == ZIO_TYPE_READ ||
4687 zio->io_type == ZIO_TYPE_FREE) &&
572e2857 4688 !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) &&
b128c09f 4689 zio->io_error == ENXIO &&
c776b317
BB
4690 spa_load_state(zio->io_spa) == SPA_LOAD_NONE &&
4691 spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE)
b128c09f
BB
4692 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
4693
4694 if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute)
4695 zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
428870ff
BB
4696
4697 /*
4698 * Here is a possibly good place to attempt to do
4699 * either combinatorial reconstruction or error correction
4700 * based on checksums. It also might be a good place
4701 * to send out preliminary ereports before we suspend
4702 * processing.
4703 */
34dc7c2f
BB
4704 }
4705
4706 /*
b128c09f
BB
4707 * If there were logical child errors, they apply to us now.
4708 * We defer this until now to avoid conflating logical child
4709 * errors with errors that happened to the zio itself when
4710 * updating vdev stats and reporting FMA events above.
34dc7c2f 4711 */
b128c09f 4712 zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL);
34dc7c2f 4713
428870ff
BB
4714 if ((zio->io_error || zio->io_reexecute) &&
4715 IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio &&
03c6040b 4716 !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)))
c776b317 4717 zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp);
9babb374
BB
4718
4719 zio_gang_tree_free(&zio->io_gang_tree);
4720
4721 /*
4722 * Godfather I/Os should never suspend.
4723 */
4724 if ((zio->io_flags & ZIO_FLAG_GODFATHER) &&
4725 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND))
a32494d2 4726 zio->io_reexecute &= ~ZIO_REEXECUTE_SUSPEND;
9babb374 4727
b128c09f
BB
4728 if (zio->io_reexecute) {
4729 /*
4730 * This is a logical I/O that wants to reexecute.
4731 *
4732 * Reexecute is top-down. When an i/o fails, if it's not
4733 * the root, it simply notifies its parent and sticks around.
4734 * The parent, seeing that it still has children in zio_done(),
4735 * does the same. This percolates all the way up to the root.
4736 * The root i/o will reexecute or suspend the entire tree.
4737 *
4738 * This approach ensures that zio_reexecute() honors
4739 * all the original i/o dependency relationships, e.g.
4740 * parents not executing until children are ready.
4741 */
4742 ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
34dc7c2f 4743
9babb374 4744 zio->io_gang_leader = NULL;
b128c09f 4745
d164b209
BB
4746 mutex_enter(&zio->io_lock);
4747 zio->io_state[ZIO_WAIT_DONE] = 1;
4748 mutex_exit(&zio->io_lock);
4749
9babb374
BB
4750 /*
4751 * "The Godfather" I/O monitors its children but is
4752 * not a true parent to them. It will track them through
4753 * the pipeline but severs its ties whenever they get into
4754 * trouble (e.g. suspended). This allows "The Godfather"
4755 * I/O to return status without blocking.
4756 */
3dfb57a3
DB
4757 zl = NULL;
4758 for (pio = zio_walk_parents(zio, &zl); pio != NULL;
4759 pio = pio_next) {
4760 zio_link_t *remove_zl = zl;
4761 pio_next = zio_walk_parents(zio, &zl);
9babb374
BB
4762
4763 if ((pio->io_flags & ZIO_FLAG_GODFATHER) &&
4764 (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) {
3dfb57a3 4765 zio_remove_child(pio, zio, remove_zl);
62840030
MA
4766 /*
4767 * This is a rare code path, so we don't
4768 * bother with "next_to_execute".
4769 */
4770 zio_notify_parent(pio, zio, ZIO_WAIT_DONE,
4771 NULL);
9babb374
BB
4772 }
4773 }
4774
d164b209 4775 if ((pio = zio_unique_parent(zio)) != NULL) {
b128c09f
BB
4776 /*
4777 * We're not a root i/o, so there's nothing to do
4778 * but notify our parent. Don't propagate errors
4779 * upward since we haven't permanently failed yet.
4780 */
9babb374 4781 ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
b128c09f 4782 zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE;
62840030
MA
4783 /*
4784 * This is a rare code path, so we don't bother with
4785 * "next_to_execute".
4786 */
4787 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, NULL);
b128c09f
BB
4788 } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) {
4789 /*
4790 * We'd fail again if we reexecuted now, so suspend
4791 * until conditions improve (e.g. device comes online).
4792 */
cec3a0a1 4793 zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR);
b128c09f
BB
4794 } else {
4795 /*
4796 * Reexecution is potentially a huge amount of work.
4797 * Hand it off to the otherwise-unused claim taskq.
4798 */
a38718a6 4799 ASSERT(taskq_empty_ent(&zio->io_tqent));
7ef5e54e
AL
4800 spa_taskq_dispatch_ent(zio->io_spa,
4801 ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE,
23c13c7e 4802 zio_reexecute, zio, 0, &zio->io_tqent);
b128c09f 4803 }
62840030 4804 return (NULL);
34dc7c2f
BB
4805 }
4806
428870ff 4807 ASSERT(zio->io_child_count == 0);
b128c09f
BB
4808 ASSERT(zio->io_reexecute == 0);
4809 ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL));
34dc7c2f 4810
428870ff
BB
4811 /*
4812 * Report any checksum errors, since the I/O is complete.
4813 */
4814 while (zio->io_cksum_report != NULL) {
4815 zio_cksum_report_t *zcr = zio->io_cksum_report;
4816 zio->io_cksum_report = zcr->zcr_next;
4817 zcr->zcr_next = NULL;
4818 zcr->zcr_finish(zcr, NULL);
4819 zfs_ereport_free_checksum(zcr);
4820 }
4821
920dd524 4822 if (zio->io_flags & ZIO_FLAG_FASTWRITE && zio->io_bp &&
9b67f605
MA
4823 !BP_IS_HOLE(zio->io_bp) && !BP_IS_EMBEDDED(zio->io_bp) &&
4824 !(zio->io_flags & ZIO_FLAG_NOPWRITE)) {
920dd524
ED
4825 metaslab_fastwrite_unmark(zio->io_spa, zio->io_bp);
4826 }
4827
d164b209
BB
4828 /*
4829 * It is the responsibility of the done callback to ensure that this
4830 * particular zio is no longer discoverable for adoption, and as
4831 * such, cannot acquire any new parents.
4832 */
b128c09f
BB
4833 if (zio->io_done)
4834 zio->io_done(zio);
34dc7c2f 4835
d164b209
BB
4836 mutex_enter(&zio->io_lock);
4837 zio->io_state[ZIO_WAIT_DONE] = 1;
4838 mutex_exit(&zio->io_lock);
34dc7c2f 4839
62840030
MA
4840 /*
4841 * We are done executing this zio. We may want to execute a parent
4842 * next. See the comment in zio_notify_parent().
4843 */
4844 zio_t *next_to_execute = NULL;
3dfb57a3
DB
4845 zl = NULL;
4846 for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) {
4847 zio_link_t *remove_zl = zl;
4848 pio_next = zio_walk_parents(zio, &zl);
4849 zio_remove_child(pio, zio, remove_zl);
62840030 4850 zio_notify_parent(pio, zio, ZIO_WAIT_DONE, &next_to_execute);
b128c09f 4851 }
34dc7c2f 4852
b128c09f
BB
4853 if (zio->io_waiter != NULL) {
4854 mutex_enter(&zio->io_lock);
4855 zio->io_executor = NULL;
4856 cv_broadcast(&zio->io_cv);
4857 mutex_exit(&zio->io_lock);
4858 } else {
4859 zio_destroy(zio);
4860 }
34dc7c2f 4861
62840030 4862 return (next_to_execute);
34dc7c2f
BB
4863}
4864
4865/*
b128c09f
BB
4866 * ==========================================================================
4867 * I/O pipeline definition
4868 * ==========================================================================
34dc7c2f 4869 */
428870ff 4870static zio_pipe_stage_t *zio_pipeline[] = {
b128c09f 4871 NULL,
b128c09f 4872 zio_read_bp_init,
3dfb57a3 4873 zio_write_bp_init,
428870ff
BB
4874 zio_free_bp_init,
4875 zio_issue_async,
3dfb57a3 4876 zio_write_compress,
b5256303 4877 zio_encrypt,
b128c09f 4878 zio_checksum_generate,
03c6040b 4879 zio_nop_write,
428870ff
BB
4880 zio_ddt_read_start,
4881 zio_ddt_read_done,
4882 zio_ddt_write,
4883 zio_ddt_free,
b128c09f
BB
4884 zio_gang_assemble,
4885 zio_gang_issue,
3dfb57a3 4886 zio_dva_throttle,
b128c09f
BB
4887 zio_dva_allocate,
4888 zio_dva_free,
4889 zio_dva_claim,
4890 zio_ready,
4891 zio_vdev_io_start,
4892 zio_vdev_io_done,
4893 zio_vdev_io_assess,
4894 zio_checksum_verify,
4895 zio_done
4896};
c28b2279 4897
9ae529ec 4898
9ae529ec 4899
9ae529ec 4900
fcff0f35
PD
4901/*
4902 * Compare two zbookmark_phys_t's to see which we would reach first in a
4903 * pre-order traversal of the object tree.
4904 *
4905 * This is simple in every case aside from the meta-dnode object. For all other
4906 * objects, we traverse them in order (object 1 before object 2, and so on).
4907 * However, all of these objects are traversed while traversing object 0, since
4908 * the data it points to is the list of objects. Thus, we need to convert to a
4909 * canonical representation so we can compare meta-dnode bookmarks to
4910 * non-meta-dnode bookmarks.
4911 *
4912 * We do this by calculating "equivalents" for each field of the zbookmark.
4913 * zbookmarks outside of the meta-dnode use their own object and level, and
4914 * calculate the level 0 equivalent (the first L0 blkid that is contained in the
4915 * blocks this bookmark refers to) by multiplying their blkid by their span
4916 * (the number of L0 blocks contained within one block at their level).
4917 * zbookmarks inside the meta-dnode calculate their object equivalent
4918 * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use
4919 * level + 1<<31 (any value larger than a level could ever be) for their level.
4920 * This causes them to always compare before a bookmark in their object
4921 * equivalent, compare appropriately to bookmarks in other objects, and to
4922 * compare appropriately to other bookmarks in the meta-dnode.
4923 */
4924int
4925zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2,
4926 const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2)
4927{
4928 /*
4929 * These variables represent the "equivalent" values for the zbookmark,
4930 * after converting zbookmarks inside the meta dnode to their
4931 * normal-object equivalents.
4932 */
4933 uint64_t zb1obj, zb2obj;
4934 uint64_t zb1L0, zb2L0;
4935 uint64_t zb1level, zb2level;
4936
4937 if (zb1->zb_object == zb2->zb_object &&
4938 zb1->zb_level == zb2->zb_level &&
4939 zb1->zb_blkid == zb2->zb_blkid)
4940 return (0);
9ae529ec 4941
30af21b0
PD
4942 IMPLY(zb1->zb_level > 0, ibs1 >= SPA_MINBLOCKSHIFT);
4943 IMPLY(zb2->zb_level > 0, ibs2 >= SPA_MINBLOCKSHIFT);
4944
fcff0f35
PD
4945 /*
4946 * BP_SPANB calculates the span in blocks.
4947 */
4948 zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level);
4949 zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level);
9ae529ec
CS
4950
4951 if (zb1->zb_object == DMU_META_DNODE_OBJECT) {
fcff0f35
PD
4952 zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
4953 zb1L0 = 0;
4954 zb1level = zb1->zb_level + COMPARE_META_LEVEL;
4955 } else {
4956 zb1obj = zb1->zb_object;
4957 zb1level = zb1->zb_level;
9ae529ec
CS
4958 }
4959
fcff0f35
PD
4960 if (zb2->zb_object == DMU_META_DNODE_OBJECT) {
4961 zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
4962 zb2L0 = 0;
4963 zb2level = zb2->zb_level + COMPARE_META_LEVEL;
4964 } else {
4965 zb2obj = zb2->zb_object;
4966 zb2level = zb2->zb_level;
4967 }
4968
4969 /* Now that we have a canonical representation, do the comparison. */
4970 if (zb1obj != zb2obj)
4971 return (zb1obj < zb2obj ? -1 : 1);
4972 else if (zb1L0 != zb2L0)
4973 return (zb1L0 < zb2L0 ? -1 : 1);
4974 else if (zb1level != zb2level)
4975 return (zb1level > zb2level ? -1 : 1);
4976 /*
4977 * This can (theoretically) happen if the bookmarks have the same object
4978 * and level, but different blkids, if the block sizes are not the same.
4979 * There is presently no way to change the indirect block sizes
4980 */
4981 return (0);
4982}
4983
4984/*
4985 * This function checks the following: given that last_block is the place that
4986 * our traversal stopped last time, does that guarantee that we've visited
4987 * every node under subtree_root? Therefore, we can't just use the raw output
4988 * of zbookmark_compare. We have to pass in a modified version of
4989 * subtree_root; by incrementing the block id, and then checking whether
4990 * last_block is before or equal to that, we can tell whether or not having
4991 * visited last_block implies that all of subtree_root's children have been
4992 * visited.
4993 */
4994boolean_t
4995zbookmark_subtree_completed(const dnode_phys_t *dnp,
4996 const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
4997{
4998 zbookmark_phys_t mod_zb = *subtree_root;
4999 mod_zb.zb_blkid++;
5000 ASSERT(last_block->zb_level == 0);
5001
5002 /* The objset_phys_t isn't before anything. */
5003 if (dnp == NULL)
9ae529ec 5004 return (B_FALSE);
fcff0f35
PD
5005
5006 /*
5007 * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the
5008 * data block size in sectors, because that variable is only used if
5009 * the bookmark refers to a block in the meta-dnode. Since we don't
5010 * know without examining it what object it refers to, and there's no
5011 * harm in passing in this value in other cases, we always pass it in.
5012 *
5013 * We pass in 0 for the indirect block size shift because zb2 must be
5014 * level 0. The indirect block size is only used to calculate the span
5015 * of the bookmark, but since the bookmark must be level 0, the span is
5016 * always 1, so the math works out.
5017 *
5018 * If you make changes to how the zbookmark_compare code works, be sure
5019 * to make sure that this code still works afterwards.
5020 */
5021 return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
5022 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb,
5023 last_block) <= 0);
9ae529ec
CS
5024}
5025
c28b2279 5026EXPORT_SYMBOL(zio_type_name);
81971b13
BB
5027EXPORT_SYMBOL(zio_buf_alloc);
5028EXPORT_SYMBOL(zio_data_buf_alloc);
5029EXPORT_SYMBOL(zio_buf_free);
5030EXPORT_SYMBOL(zio_data_buf_free);
c28b2279 5031
03fdcb9a 5032ZFS_MODULE_PARAM(zfs_zio, zio_, slow_io_ms, INT, ZMOD_RW,
ad796b8a 5033 "Max I/O completion time (milliseconds) before marking it as slow");
c409e464 5034
03fdcb9a
MM
5035ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW,
5036 "Prioritize requeued I/O");
29dee3ee 5037
03fdcb9a 5038ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free, INT, ZMOD_RW,
d1d7e268 5039 "Defer frees starting in this pass");
29dee3ee 5040
03fdcb9a 5041ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, INT, ZMOD_RW,
d1d7e268 5042 "Don't compress starting in this pass");
29dee3ee 5043
03fdcb9a 5044ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, INT, ZMOD_RW,
d1d7e268 5045 "Rewrite new bps starting in this pass");
3dfb57a3 5046
03fdcb9a 5047ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW,
3dfb57a3 5048 "Throttle block allocations in the ZIO pipeline");
638dd5f4 5049
03fdcb9a 5050ZFS_MODULE_PARAM(zfs_zio, zio_, deadman_log_all, INT, ZMOD_RW,
638dd5f4 5051 "Log all slow ZIOs, not just those with vdevs");