]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
4f072827 | 23 | * Copyright (c) 2011, 2020 by Delphix. All rights reserved. |
a38718a6 | 24 | * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved. |
cc99f275 | 25 | * Copyright (c) 2017, Intel Corporation. |
10b3c7f5 MN |
26 | * Copyright (c) 2019, Klara Inc. |
27 | * Copyright (c) 2019, Allan Jude | |
34dc7c2f BB |
28 | */ |
29 | ||
f1512ee6 | 30 | #include <sys/sysmacros.h> |
34dc7c2f BB |
31 | #include <sys/zfs_context.h> |
32 | #include <sys/fm/fs/zfs.h> | |
33 | #include <sys/spa.h> | |
34 | #include <sys/txg.h> | |
35 | #include <sys/spa_impl.h> | |
36 | #include <sys/vdev_impl.h> | |
1b939560 | 37 | #include <sys/vdev_trim.h> |
34dc7c2f BB |
38 | #include <sys/zio_impl.h> |
39 | #include <sys/zio_compress.h> | |
40 | #include <sys/zio_checksum.h> | |
428870ff BB |
41 | #include <sys/dmu_objset.h> |
42 | #include <sys/arc.h> | |
43 | #include <sys/ddt.h> | |
9b67f605 | 44 | #include <sys/blkptr.h> |
b0bc7a84 | 45 | #include <sys/zfeature.h> |
d4a72f23 | 46 | #include <sys/dsl_scan.h> |
3dfb57a3 | 47 | #include <sys/metaslab_impl.h> |
193a37cb | 48 | #include <sys/time.h> |
e5d1c27e | 49 | #include <sys/trace_zfs.h> |
a6255b7f | 50 | #include <sys/abd.h> |
b5256303 | 51 | #include <sys/dsl_crypt.h> |
3f387973 | 52 | #include <cityhash.h> |
34dc7c2f | 53 | |
34dc7c2f BB |
54 | /* |
55 | * ========================================================================== | |
56 | * I/O type descriptions | |
57 | * ========================================================================== | |
58 | */ | |
e8b96c60 | 59 | const char *zio_type_name[ZIO_TYPES] = { |
3dfb57a3 DB |
60 | /* |
61 | * Note: Linux kernel thread name length is limited | |
62 | * so these names will differ from upstream open zfs. | |
63 | */ | |
1b939560 | 64 | "z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_ioctl", "z_trim" |
428870ff | 65 | }; |
34dc7c2f | 66 | |
27f2b90d | 67 | int zio_dva_throttle_enabled = B_TRUE; |
638dd5f4 | 68 | int zio_deadman_log_all = B_FALSE; |
3dfb57a3 | 69 | |
34dc7c2f BB |
70 | /* |
71 | * ========================================================================== | |
72 | * I/O kmem caches | |
73 | * ========================================================================== | |
74 | */ | |
75 | kmem_cache_t *zio_cache; | |
d164b209 | 76 | kmem_cache_t *zio_link_cache; |
34dc7c2f BB |
77 | kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; |
78 | kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; | |
a6255b7f DQ |
79 | #if defined(ZFS_DEBUG) && !defined(_KERNEL) |
80 | uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; | |
81 | uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; | |
82 | #endif | |
83 | ||
ad796b8a TH |
84 | /* Mark IOs as "slow" if they take longer than 30 seconds */ |
85 | int zio_slow_io_ms = (30 * MILLISEC); | |
34dc7c2f | 86 | |
fcff0f35 PD |
87 | #define BP_SPANB(indblkshift, level) \ |
88 | (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT))) | |
89 | #define COMPARE_META_LEVEL 0x80000000ul | |
55d85d5a GW |
90 | /* |
91 | * The following actions directly effect the spa's sync-to-convergence logic. | |
92 | * The values below define the sync pass when we start performing the action. | |
93 | * Care should be taken when changing these values as they directly impact | |
94 | * spa_sync() performance. Tuning these values may introduce subtle performance | |
95 | * pathologies and should only be done in the context of performance analysis. | |
96 | * These tunables will eventually be removed and replaced with #defines once | |
97 | * enough analysis has been done to determine optimal values. | |
98 | * | |
99 | * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that | |
100 | * regular blocks are not deferred. | |
be89734a MA |
101 | * |
102 | * Starting in sync pass 8 (zfs_sync_pass_dont_compress), we disable | |
103 | * compression (including of metadata). In practice, we don't have this | |
104 | * many sync passes, so this has no effect. | |
105 | * | |
106 | * The original intent was that disabling compression would help the sync | |
107 | * passes to converge. However, in practice disabling compression increases | |
108 | * the average number of sync passes, because when we turn compression off, a | |
109 | * lot of block's size will change and thus we have to re-allocate (not | |
110 | * overwrite) them. It also increases the number of 128KB allocations (e.g. | |
111 | * for indirect blocks and spacemaps) because these will not be compressed. | |
112 | * The 128K allocations are especially detrimental to performance on highly | |
113 | * fragmented systems, which may have very few free segments of this size, | |
114 | * and may need to load new metaslabs to satisfy 128K allocations. | |
55d85d5a GW |
115 | */ |
116 | int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */ | |
be89734a | 117 | int zfs_sync_pass_dont_compress = 8; /* don't compress starting in this pass */ |
55d85d5a GW |
118 | int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */ |
119 | ||
34dc7c2f | 120 | /* |
b128c09f BB |
121 | * An allocating zio is one that either currently has the DVA allocate |
122 | * stage set or will have it later in its lifetime. | |
34dc7c2f | 123 | */ |
428870ff BB |
124 | #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) |
125 | ||
3c502d3b MM |
126 | /* |
127 | * Enable smaller cores by excluding metadata | |
128 | * allocations as well. | |
129 | */ | |
130 | int zio_exclude_metadata = 0; | |
c409e464 | 131 | int zio_requeue_io_start_cut_in_line = 1; |
428870ff BB |
132 | |
133 | #ifdef ZFS_DEBUG | |
134 | int zio_buf_debug_limit = 16384; | |
135 | #else | |
136 | int zio_buf_debug_limit = 0; | |
137 | #endif | |
34dc7c2f | 138 | |
da6b4005 NB |
139 | static inline void __zio_execute(zio_t *zio); |
140 | ||
3dfb57a3 DB |
141 | static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t); |
142 | ||
34dc7c2f BB |
143 | void |
144 | zio_init(void) | |
145 | { | |
146 | size_t c; | |
34dc7c2f | 147 | |
3941503c BB |
148 | zio_cache = kmem_cache_create("zio_cache", |
149 | sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); | |
d164b209 | 150 | zio_link_cache = kmem_cache_create("zio_link_cache", |
6795a698 | 151 | sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); |
34dc7c2f BB |
152 | |
153 | /* | |
154 | * For small buffers, we want a cache for each multiple of | |
f1512ee6 MA |
155 | * SPA_MINBLOCKSIZE. For larger buffers, we want a cache |
156 | * for each quarter-power of 2. | |
34dc7c2f BB |
157 | */ |
158 | for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { | |
159 | size_t size = (c + 1) << SPA_MINBLOCKSHIFT; | |
160 | size_t p2 = size; | |
161 | size_t align = 0; | |
3c502d3b MM |
162 | size_t data_cflags, cflags; |
163 | ||
164 | data_cflags = KMC_NODEBUG; | |
165 | cflags = (zio_exclude_metadata || size > zio_buf_debug_limit) ? | |
166 | KMC_NODEBUG : 0; | |
34dc7c2f | 167 | |
34328f3c | 168 | #if defined(_ILP32) && defined(_KERNEL) |
f1512ee6 MA |
169 | /* |
170 | * Cache size limited to 1M on 32-bit platforms until ARC | |
171 | * buffers no longer require virtual address space. | |
172 | */ | |
173 | if (size > zfs_max_recordsize) | |
174 | break; | |
175 | #endif | |
176 | ||
177 | while (!ISP2(p2)) | |
34dc7c2f BB |
178 | p2 &= p2 - 1; |
179 | ||
498877ba MA |
180 | #ifndef _KERNEL |
181 | /* | |
182 | * If we are using watchpoints, put each buffer on its own page, | |
183 | * to eliminate the performance overhead of trapping to the | |
184 | * kernel when modifying a non-watched buffer that shares the | |
185 | * page with a watched buffer. | |
186 | */ | |
187 | if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE)) | |
188 | continue; | |
fcf64f45 BB |
189 | /* |
190 | * Here's the problem - on 4K native devices in userland on | |
191 | * Linux using O_DIRECT, buffers must be 4K aligned or I/O | |
192 | * will fail with EINVAL, causing zdb (and others) to coredump. | |
193 | * Since userland probably doesn't need optimized buffer caches, | |
194 | * we just force 4K alignment on everything. | |
195 | */ | |
196 | align = 8 * SPA_MINBLOCKSIZE; | |
197 | #else | |
24fa2034 | 198 | if (size < PAGESIZE) { |
34dc7c2f | 199 | align = SPA_MINBLOCKSIZE; |
498877ba | 200 | } else if (IS_P2ALIGNED(size, p2 >> 2)) { |
24fa2034 | 201 | align = PAGESIZE; |
34dc7c2f | 202 | } |
fcf64f45 | 203 | #endif |
34dc7c2f BB |
204 | |
205 | if (align != 0) { | |
206 | char name[36]; | |
c9e319fa JL |
207 | (void) snprintf(name, sizeof (name), "zio_buf_%lu", |
208 | (ulong_t)size); | |
34dc7c2f | 209 | zio_buf_cache[c] = kmem_cache_create(name, size, |
6442f3cf | 210 | align, NULL, NULL, NULL, NULL, NULL, cflags); |
34dc7c2f | 211 | |
c9e319fa JL |
212 | (void) snprintf(name, sizeof (name), "zio_data_buf_%lu", |
213 | (ulong_t)size); | |
34dc7c2f | 214 | zio_data_buf_cache[c] = kmem_cache_create(name, size, |
18ca574f | 215 | align, NULL, NULL, NULL, NULL, NULL, data_cflags); |
34dc7c2f BB |
216 | } |
217 | } | |
218 | ||
219 | while (--c != 0) { | |
220 | ASSERT(zio_buf_cache[c] != NULL); | |
221 | if (zio_buf_cache[c - 1] == NULL) | |
222 | zio_buf_cache[c - 1] = zio_buf_cache[c]; | |
223 | ||
224 | ASSERT(zio_data_buf_cache[c] != NULL); | |
225 | if (zio_data_buf_cache[c - 1] == NULL) | |
226 | zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; | |
227 | } | |
228 | ||
34dc7c2f | 229 | zio_inject_init(); |
9759c60f ED |
230 | |
231 | lz4_init(); | |
34dc7c2f BB |
232 | } |
233 | ||
234 | void | |
235 | zio_fini(void) | |
236 | { | |
237 | size_t c; | |
238 | kmem_cache_t *last_cache = NULL; | |
239 | kmem_cache_t *last_data_cache = NULL; | |
240 | ||
241 | for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { | |
f1512ee6 MA |
242 | #ifdef _ILP32 |
243 | /* | |
244 | * Cache size limited to 1M on 32-bit platforms until ARC | |
245 | * buffers no longer require virtual address space. | |
246 | */ | |
247 | if (((c + 1) << SPA_MINBLOCKSHIFT) > zfs_max_recordsize) | |
248 | break; | |
a6255b7f DQ |
249 | #endif |
250 | #if defined(ZFS_DEBUG) && !defined(_KERNEL) | |
251 | if (zio_buf_cache_allocs[c] != zio_buf_cache_frees[c]) | |
252 | (void) printf("zio_fini: [%d] %llu != %llu\n", | |
253 | (int)((c + 1) << SPA_MINBLOCKSHIFT), | |
254 | (long long unsigned)zio_buf_cache_allocs[c], | |
255 | (long long unsigned)zio_buf_cache_frees[c]); | |
f1512ee6 | 256 | #endif |
34dc7c2f BB |
257 | if (zio_buf_cache[c] != last_cache) { |
258 | last_cache = zio_buf_cache[c]; | |
259 | kmem_cache_destroy(zio_buf_cache[c]); | |
260 | } | |
261 | zio_buf_cache[c] = NULL; | |
262 | ||
263 | if (zio_data_buf_cache[c] != last_data_cache) { | |
264 | last_data_cache = zio_data_buf_cache[c]; | |
265 | kmem_cache_destroy(zio_data_buf_cache[c]); | |
266 | } | |
267 | zio_data_buf_cache[c] = NULL; | |
268 | } | |
269 | ||
d164b209 | 270 | kmem_cache_destroy(zio_link_cache); |
34dc7c2f BB |
271 | kmem_cache_destroy(zio_cache); |
272 | ||
273 | zio_inject_fini(); | |
9759c60f ED |
274 | |
275 | lz4_fini(); | |
34dc7c2f BB |
276 | } |
277 | ||
278 | /* | |
279 | * ========================================================================== | |
280 | * Allocate and free I/O buffers | |
281 | * ========================================================================== | |
282 | */ | |
283 | ||
284 | /* | |
285 | * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a | |
286 | * crashdump if the kernel panics, so use it judiciously. Obviously, it's | |
287 | * useful to inspect ZFS metadata, but if possible, we should avoid keeping | |
288 | * excess / transient data in-core during a crashdump. | |
289 | */ | |
290 | void * | |
291 | zio_buf_alloc(size_t size) | |
292 | { | |
293 | size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; | |
294 | ||
63e3a861 | 295 | VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); |
a6255b7f DQ |
296 | #if defined(ZFS_DEBUG) && !defined(_KERNEL) |
297 | atomic_add_64(&zio_buf_cache_allocs[c], 1); | |
298 | #endif | |
34dc7c2f | 299 | |
efcd79a8 | 300 | return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); |
34dc7c2f BB |
301 | } |
302 | ||
303 | /* | |
304 | * Use zio_data_buf_alloc to allocate data. The data will not appear in a | |
305 | * crashdump if the kernel panics. This exists so that we will limit the amount | |
306 | * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount | |
307 | * of kernel heap dumped to disk when the kernel panics) | |
308 | */ | |
309 | void * | |
310 | zio_data_buf_alloc(size_t size) | |
311 | { | |
312 | size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; | |
313 | ||
63e3a861 | 314 | VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); |
34dc7c2f | 315 | |
efcd79a8 | 316 | return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); |
34dc7c2f BB |
317 | } |
318 | ||
319 | void | |
320 | zio_buf_free(void *buf, size_t size) | |
321 | { | |
322 | size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; | |
323 | ||
63e3a861 | 324 | VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); |
a6255b7f DQ |
325 | #if defined(ZFS_DEBUG) && !defined(_KERNEL) |
326 | atomic_add_64(&zio_buf_cache_frees[c], 1); | |
327 | #endif | |
34dc7c2f BB |
328 | |
329 | kmem_cache_free(zio_buf_cache[c], buf); | |
330 | } | |
331 | ||
332 | void | |
333 | zio_data_buf_free(void *buf, size_t size) | |
334 | { | |
335 | size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; | |
336 | ||
63e3a861 | 337 | VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); |
34dc7c2f BB |
338 | |
339 | kmem_cache_free(zio_data_buf_cache[c], buf); | |
340 | } | |
341 | ||
84c07ada GN |
342 | static void |
343 | zio_abd_free(void *abd, size_t size) | |
344 | { | |
345 | abd_free((abd_t *)abd); | |
346 | } | |
347 | ||
34dc7c2f BB |
348 | /* |
349 | * ========================================================================== | |
350 | * Push and pop I/O transform buffers | |
351 | * ========================================================================== | |
352 | */ | |
d3c2ae1c | 353 | void |
a6255b7f | 354 | zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize, |
e9aa730c | 355 | zio_transform_func_t *transform) |
34dc7c2f | 356 | { |
79c76d5b | 357 | zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); |
34dc7c2f | 358 | |
a6255b7f | 359 | zt->zt_orig_abd = zio->io_abd; |
b128c09f | 360 | zt->zt_orig_size = zio->io_size; |
34dc7c2f | 361 | zt->zt_bufsize = bufsize; |
b128c09f | 362 | zt->zt_transform = transform; |
34dc7c2f BB |
363 | |
364 | zt->zt_next = zio->io_transform_stack; | |
365 | zio->io_transform_stack = zt; | |
366 | ||
a6255b7f | 367 | zio->io_abd = data; |
34dc7c2f BB |
368 | zio->io_size = size; |
369 | } | |
370 | ||
d3c2ae1c | 371 | void |
b128c09f | 372 | zio_pop_transforms(zio_t *zio) |
34dc7c2f | 373 | { |
b128c09f BB |
374 | zio_transform_t *zt; |
375 | ||
376 | while ((zt = zio->io_transform_stack) != NULL) { | |
377 | if (zt->zt_transform != NULL) | |
378 | zt->zt_transform(zio, | |
a6255b7f | 379 | zt->zt_orig_abd, zt->zt_orig_size); |
34dc7c2f | 380 | |
428870ff | 381 | if (zt->zt_bufsize != 0) |
a6255b7f | 382 | abd_free(zio->io_abd); |
34dc7c2f | 383 | |
a6255b7f | 384 | zio->io_abd = zt->zt_orig_abd; |
b128c09f BB |
385 | zio->io_size = zt->zt_orig_size; |
386 | zio->io_transform_stack = zt->zt_next; | |
34dc7c2f | 387 | |
b128c09f | 388 | kmem_free(zt, sizeof (zio_transform_t)); |
34dc7c2f BB |
389 | } |
390 | } | |
391 | ||
b128c09f BB |
392 | /* |
393 | * ========================================================================== | |
b5256303 | 394 | * I/O transform callbacks for subblocks, decompression, and decryption |
b128c09f BB |
395 | * ========================================================================== |
396 | */ | |
397 | static void | |
a6255b7f | 398 | zio_subblock(zio_t *zio, abd_t *data, uint64_t size) |
b128c09f BB |
399 | { |
400 | ASSERT(zio->io_size > size); | |
401 | ||
402 | if (zio->io_type == ZIO_TYPE_READ) | |
a6255b7f | 403 | abd_copy(data, zio->io_abd, size); |
b128c09f BB |
404 | } |
405 | ||
406 | static void | |
a6255b7f | 407 | zio_decompress(zio_t *zio, abd_t *data, uint64_t size) |
b128c09f | 408 | { |
a6255b7f DQ |
409 | if (zio->io_error == 0) { |
410 | void *tmp = abd_borrow_buf(data, size); | |
411 | int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), | |
10b3c7f5 MN |
412 | zio->io_abd, tmp, zio->io_size, size, |
413 | &zio->io_prop.zp_complevel); | |
a6255b7f DQ |
414 | abd_return_buf_copy(data, tmp, size); |
415 | ||
c3bd3fb4 TC |
416 | if (zio_injection_enabled && ret == 0) |
417 | ret = zio_handle_fault_injection(zio, EINVAL); | |
418 | ||
a6255b7f DQ |
419 | if (ret != 0) |
420 | zio->io_error = SET_ERROR(EIO); | |
421 | } | |
b128c09f BB |
422 | } |
423 | ||
b5256303 TC |
424 | static void |
425 | zio_decrypt(zio_t *zio, abd_t *data, uint64_t size) | |
426 | { | |
427 | int ret; | |
428 | void *tmp; | |
429 | blkptr_t *bp = zio->io_bp; | |
ae76f45c TC |
430 | spa_t *spa = zio->io_spa; |
431 | uint64_t dsobj = zio->io_bookmark.zb_objset; | |
b5256303 TC |
432 | uint64_t lsize = BP_GET_LSIZE(bp); |
433 | dmu_object_type_t ot = BP_GET_TYPE(bp); | |
434 | uint8_t salt[ZIO_DATA_SALT_LEN]; | |
435 | uint8_t iv[ZIO_DATA_IV_LEN]; | |
436 | uint8_t mac[ZIO_DATA_MAC_LEN]; | |
437 | boolean_t no_crypt = B_FALSE; | |
438 | ||
439 | ASSERT(BP_USES_CRYPT(bp)); | |
440 | ASSERT3U(size, !=, 0); | |
441 | ||
442 | if (zio->io_error != 0) | |
443 | return; | |
444 | ||
445 | /* | |
446 | * Verify the cksum of MACs stored in an indirect bp. It will always | |
447 | * be possible to verify this since it does not require an encryption | |
448 | * key. | |
449 | */ | |
450 | if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) { | |
451 | zio_crypt_decode_mac_bp(bp, mac); | |
452 | ||
453 | if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) { | |
454 | /* | |
455 | * We haven't decompressed the data yet, but | |
456 | * zio_crypt_do_indirect_mac_checksum() requires | |
457 | * decompressed data to be able to parse out the MACs | |
458 | * from the indirect block. We decompress it now and | |
459 | * throw away the result after we are finished. | |
460 | */ | |
461 | tmp = zio_buf_alloc(lsize); | |
462 | ret = zio_decompress_data(BP_GET_COMPRESS(bp), | |
10b3c7f5 MN |
463 | zio->io_abd, tmp, zio->io_size, lsize, |
464 | &zio->io_prop.zp_complevel); | |
b5256303 TC |
465 | if (ret != 0) { |
466 | ret = SET_ERROR(EIO); | |
467 | goto error; | |
468 | } | |
469 | ret = zio_crypt_do_indirect_mac_checksum(B_FALSE, | |
470 | tmp, lsize, BP_SHOULD_BYTESWAP(bp), mac); | |
471 | zio_buf_free(tmp, lsize); | |
472 | } else { | |
473 | ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE, | |
474 | zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac); | |
475 | } | |
476 | abd_copy(data, zio->io_abd, size); | |
477 | ||
be9a5c35 TC |
478 | if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) { |
479 | ret = zio_handle_decrypt_injection(spa, | |
480 | &zio->io_bookmark, ot, ECKSUM); | |
481 | } | |
b5256303 TC |
482 | if (ret != 0) |
483 | goto error; | |
484 | ||
485 | return; | |
486 | } | |
487 | ||
488 | /* | |
489 | * If this is an authenticated block, just check the MAC. It would be | |
490 | * nice to separate this out into its own flag, but for the moment | |
491 | * enum zio_flag is out of bits. | |
492 | */ | |
493 | if (BP_IS_AUTHENTICATED(bp)) { | |
494 | if (ot == DMU_OT_OBJSET) { | |
ae76f45c TC |
495 | ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa, |
496 | dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp)); | |
b5256303 TC |
497 | } else { |
498 | zio_crypt_decode_mac_bp(bp, mac); | |
ae76f45c TC |
499 | ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj, |
500 | zio->io_abd, size, mac); | |
be9a5c35 TC |
501 | if (zio_injection_enabled && ret == 0) { |
502 | ret = zio_handle_decrypt_injection(spa, | |
503 | &zio->io_bookmark, ot, ECKSUM); | |
504 | } | |
b5256303 TC |
505 | } |
506 | abd_copy(data, zio->io_abd, size); | |
507 | ||
508 | if (ret != 0) | |
509 | goto error; | |
510 | ||
511 | return; | |
512 | } | |
513 | ||
514 | zio_crypt_decode_params_bp(bp, salt, iv); | |
515 | ||
516 | if (ot == DMU_OT_INTENT_LOG) { | |
517 | tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t)); | |
518 | zio_crypt_decode_mac_zil(tmp, mac); | |
519 | abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t)); | |
520 | } else { | |
521 | zio_crypt_decode_mac_bp(bp, mac); | |
522 | } | |
523 | ||
be9a5c35 TC |
524 | ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp), |
525 | BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data, | |
526 | zio->io_abd, &no_crypt); | |
b5256303 TC |
527 | if (no_crypt) |
528 | abd_copy(data, zio->io_abd, size); | |
529 | ||
530 | if (ret != 0) | |
531 | goto error; | |
532 | ||
533 | return; | |
534 | ||
535 | error: | |
536 | /* assert that the key was found unless this was speculative */ | |
be9a5c35 | 537 | ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE)); |
b5256303 TC |
538 | |
539 | /* | |
540 | * If there was a decryption / authentication error return EIO as | |
541 | * the io_error. If this was not a speculative zio, create an ereport. | |
542 | */ | |
543 | if (ret == ECKSUM) { | |
a2c2ed1b | 544 | zio->io_error = SET_ERROR(EIO); |
b5256303 | 545 | if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) { |
be9a5c35 | 546 | spa_log_error(spa, &zio->io_bookmark); |
1144586b | 547 | (void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION, |
4f072827 | 548 | spa, NULL, &zio->io_bookmark, zio, 0); |
b5256303 TC |
549 | } |
550 | } else { | |
551 | zio->io_error = ret; | |
552 | } | |
553 | } | |
554 | ||
b128c09f BB |
555 | /* |
556 | * ========================================================================== | |
557 | * I/O parent/child relationships and pipeline interlocks | |
558 | * ========================================================================== | |
559 | */ | |
d164b209 | 560 | zio_t * |
3dfb57a3 | 561 | zio_walk_parents(zio_t *cio, zio_link_t **zl) |
d164b209 | 562 | { |
d164b209 | 563 | list_t *pl = &cio->io_parent_list; |
b128c09f | 564 | |
3dfb57a3 DB |
565 | *zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl); |
566 | if (*zl == NULL) | |
d164b209 BB |
567 | return (NULL); |
568 | ||
3dfb57a3 DB |
569 | ASSERT((*zl)->zl_child == cio); |
570 | return ((*zl)->zl_parent); | |
d164b209 BB |
571 | } |
572 | ||
573 | zio_t * | |
3dfb57a3 | 574 | zio_walk_children(zio_t *pio, zio_link_t **zl) |
d164b209 | 575 | { |
d164b209 BB |
576 | list_t *cl = &pio->io_child_list; |
577 | ||
a8b2e306 TC |
578 | ASSERT(MUTEX_HELD(&pio->io_lock)); |
579 | ||
3dfb57a3 DB |
580 | *zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl); |
581 | if (*zl == NULL) | |
d164b209 BB |
582 | return (NULL); |
583 | ||
3dfb57a3 DB |
584 | ASSERT((*zl)->zl_parent == pio); |
585 | return ((*zl)->zl_child); | |
d164b209 BB |
586 | } |
587 | ||
588 | zio_t * | |
589 | zio_unique_parent(zio_t *cio) | |
590 | { | |
3dfb57a3 DB |
591 | zio_link_t *zl = NULL; |
592 | zio_t *pio = zio_walk_parents(cio, &zl); | |
d164b209 | 593 | |
3dfb57a3 | 594 | VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL); |
d164b209 BB |
595 | return (pio); |
596 | } | |
597 | ||
598 | void | |
599 | zio_add_child(zio_t *pio, zio_t *cio) | |
b128c09f | 600 | { |
79c76d5b | 601 | zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); |
d164b209 BB |
602 | |
603 | /* | |
604 | * Logical I/Os can have logical, gang, or vdev children. | |
605 | * Gang I/Os can have gang or vdev children. | |
606 | * Vdev I/Os can only have vdev children. | |
607 | * The following ASSERT captures all of these constraints. | |
608 | */ | |
1ce23dca | 609 | ASSERT3S(cio->io_child_type, <=, pio->io_child_type); |
d164b209 BB |
610 | |
611 | zl->zl_parent = pio; | |
612 | zl->zl_child = cio; | |
613 | ||
b128c09f | 614 | mutex_enter(&pio->io_lock); |
a8b2e306 | 615 | mutex_enter(&cio->io_lock); |
d164b209 BB |
616 | |
617 | ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); | |
618 | ||
1c27024e | 619 | for (int w = 0; w < ZIO_WAIT_TYPES; w++) |
d164b209 BB |
620 | pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; |
621 | ||
622 | list_insert_head(&pio->io_child_list, zl); | |
623 | list_insert_head(&cio->io_parent_list, zl); | |
624 | ||
428870ff BB |
625 | pio->io_child_count++; |
626 | cio->io_parent_count++; | |
627 | ||
d164b209 | 628 | mutex_exit(&cio->io_lock); |
a8b2e306 | 629 | mutex_exit(&pio->io_lock); |
b128c09f BB |
630 | } |
631 | ||
34dc7c2f | 632 | static void |
d164b209 | 633 | zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) |
b128c09f | 634 | { |
d164b209 BB |
635 | ASSERT(zl->zl_parent == pio); |
636 | ASSERT(zl->zl_child == cio); | |
b128c09f BB |
637 | |
638 | mutex_enter(&pio->io_lock); | |
a8b2e306 | 639 | mutex_enter(&cio->io_lock); |
d164b209 BB |
640 | |
641 | list_remove(&pio->io_child_list, zl); | |
642 | list_remove(&cio->io_parent_list, zl); | |
643 | ||
428870ff BB |
644 | pio->io_child_count--; |
645 | cio->io_parent_count--; | |
646 | ||
d164b209 | 647 | mutex_exit(&cio->io_lock); |
a8b2e306 | 648 | mutex_exit(&pio->io_lock); |
d164b209 | 649 | kmem_cache_free(zio_link_cache, zl); |
b128c09f BB |
650 | } |
651 | ||
652 | static boolean_t | |
ddc751d5 | 653 | zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait) |
34dc7c2f | 654 | { |
b128c09f BB |
655 | boolean_t waiting = B_FALSE; |
656 | ||
657 | mutex_enter(&zio->io_lock); | |
658 | ASSERT(zio->io_stall == NULL); | |
ddc751d5 GW |
659 | for (int c = 0; c < ZIO_CHILD_TYPES; c++) { |
660 | if (!(ZIO_CHILD_BIT_IS_SET(childbits, c))) | |
661 | continue; | |
662 | ||
663 | uint64_t *countp = &zio->io_children[c][wait]; | |
664 | if (*countp != 0) { | |
665 | zio->io_stage >>= 1; | |
666 | ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN); | |
667 | zio->io_stall = countp; | |
668 | waiting = B_TRUE; | |
669 | break; | |
670 | } | |
b128c09f BB |
671 | } |
672 | mutex_exit(&zio->io_lock); | |
b128c09f BB |
673 | return (waiting); |
674 | } | |
34dc7c2f | 675 | |
bf701a83 BB |
676 | __attribute__((always_inline)) |
677 | static inline void | |
62840030 MA |
678 | zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait, |
679 | zio_t **next_to_executep) | |
b128c09f BB |
680 | { |
681 | uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; | |
682 | int *errorp = &pio->io_child_error[zio->io_child_type]; | |
34dc7c2f | 683 | |
b128c09f BB |
684 | mutex_enter(&pio->io_lock); |
685 | if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) | |
686 | *errorp = zio_worst_error(*errorp, zio->io_error); | |
687 | pio->io_reexecute |= zio->io_reexecute; | |
688 | ASSERT3U(*countp, >, 0); | |
e8b96c60 MA |
689 | |
690 | (*countp)--; | |
691 | ||
692 | if (*countp == 0 && pio->io_stall == countp) { | |
3dfb57a3 DB |
693 | zio_taskq_type_t type = |
694 | pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE : | |
695 | ZIO_TASKQ_INTERRUPT; | |
b128c09f BB |
696 | pio->io_stall = NULL; |
697 | mutex_exit(&pio->io_lock); | |
62840030 | 698 | |
3dfb57a3 | 699 | /* |
62840030 MA |
700 | * If we can tell the caller to execute this parent next, do |
701 | * so. Otherwise dispatch the parent zio as its own task. | |
702 | * | |
703 | * Having the caller execute the parent when possible reduces | |
704 | * locking on the zio taskq's, reduces context switch | |
705 | * overhead, and has no recursion penalty. Note that one | |
706 | * read from disk typically causes at least 3 zio's: a | |
707 | * zio_null(), the logical zio_read(), and then a physical | |
708 | * zio. When the physical ZIO completes, we are able to call | |
709 | * zio_done() on all 3 of these zio's from one invocation of | |
710 | * zio_execute() by returning the parent back to | |
711 | * zio_execute(). Since the parent isn't executed until this | |
712 | * thread returns back to zio_execute(), the caller should do | |
713 | * so promptly. | |
714 | * | |
715 | * In other cases, dispatching the parent prevents | |
716 | * overflowing the stack when we have deeply nested | |
717 | * parent-child relationships, as we do with the "mega zio" | |
718 | * of writes for spa_sync(), and the chain of ZIL blocks. | |
3dfb57a3 | 719 | */ |
62840030 MA |
720 | if (next_to_executep != NULL && *next_to_executep == NULL) { |
721 | *next_to_executep = pio; | |
722 | } else { | |
723 | zio_taskq_dispatch(pio, type, B_FALSE); | |
724 | } | |
b128c09f BB |
725 | } else { |
726 | mutex_exit(&pio->io_lock); | |
34dc7c2f BB |
727 | } |
728 | } | |
729 | ||
b128c09f BB |
730 | static void |
731 | zio_inherit_child_errors(zio_t *zio, enum zio_child c) | |
732 | { | |
733 | if (zio->io_child_error[c] != 0 && zio->io_error == 0) | |
734 | zio->io_error = zio->io_child_error[c]; | |
735 | } | |
736 | ||
3dfb57a3 | 737 | int |
64fc7762 | 738 | zio_bookmark_compare(const void *x1, const void *x2) |
3dfb57a3 DB |
739 | { |
740 | const zio_t *z1 = x1; | |
741 | const zio_t *z2 = x2; | |
3dfb57a3 | 742 | |
64fc7762 MA |
743 | if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset) |
744 | return (-1); | |
745 | if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset) | |
746 | return (1); | |
3dfb57a3 | 747 | |
64fc7762 MA |
748 | if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object) |
749 | return (-1); | |
750 | if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object) | |
751 | return (1); | |
3dfb57a3 | 752 | |
64fc7762 MA |
753 | if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level) |
754 | return (-1); | |
755 | if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level) | |
756 | return (1); | |
757 | ||
758 | if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid) | |
759 | return (-1); | |
760 | if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid) | |
761 | return (1); | |
762 | ||
763 | if (z1 < z2) | |
764 | return (-1); | |
765 | if (z1 > z2) | |
766 | return (1); | |
767 | ||
768 | return (0); | |
3dfb57a3 DB |
769 | } |
770 | ||
34dc7c2f BB |
771 | /* |
772 | * ========================================================================== | |
b128c09f | 773 | * Create the various types of I/O (read, write, free, etc) |
34dc7c2f BB |
774 | * ========================================================================== |
775 | */ | |
776 | static zio_t * | |
428870ff | 777 | zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, |
a6255b7f | 778 | abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done, |
2aa34383 DK |
779 | void *private, zio_type_t type, zio_priority_t priority, |
780 | enum zio_flag flags, vdev_t *vd, uint64_t offset, | |
781 | const zbookmark_phys_t *zb, enum zio_stage stage, | |
782 | enum zio_stage pipeline) | |
34dc7c2f BB |
783 | { |
784 | zio_t *zio; | |
785 | ||
1b939560 | 786 | IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE); |
2aa34383 | 787 | ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0); |
b128c09f BB |
788 | ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); |
789 | ||
790 | ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); | |
791 | ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); | |
792 | ASSERT(vd || stage == ZIO_STAGE_OPEN); | |
34dc7c2f | 793 | |
b5256303 | 794 | IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0); |
2aa34383 | 795 | |
79c76d5b | 796 | zio = kmem_cache_alloc(zio_cache, KM_SLEEP); |
3941503c BB |
797 | bzero(zio, sizeof (zio_t)); |
798 | ||
448d7aaa | 799 | mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL); |
3941503c BB |
800 | cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); |
801 | ||
802 | list_create(&zio->io_parent_list, sizeof (zio_link_t), | |
803 | offsetof(zio_link_t, zl_parent_node)); | |
804 | list_create(&zio->io_child_list, sizeof (zio_link_t), | |
805 | offsetof(zio_link_t, zl_child_node)); | |
4e21fd06 | 806 | metaslab_trace_init(&zio->io_alloc_list); |
d164b209 | 807 | |
b128c09f BB |
808 | if (vd != NULL) |
809 | zio->io_child_type = ZIO_CHILD_VDEV; | |
810 | else if (flags & ZIO_FLAG_GANG_CHILD) | |
811 | zio->io_child_type = ZIO_CHILD_GANG; | |
428870ff BB |
812 | else if (flags & ZIO_FLAG_DDT_CHILD) |
813 | zio->io_child_type = ZIO_CHILD_DDT; | |
b128c09f BB |
814 | else |
815 | zio->io_child_type = ZIO_CHILD_LOGICAL; | |
816 | ||
34dc7c2f | 817 | if (bp != NULL) { |
428870ff | 818 | zio->io_bp = (blkptr_t *)bp; |
34dc7c2f BB |
819 | zio->io_bp_copy = *bp; |
820 | zio->io_bp_orig = *bp; | |
428870ff BB |
821 | if (type != ZIO_TYPE_WRITE || |
822 | zio->io_child_type == ZIO_CHILD_DDT) | |
b128c09f | 823 | zio->io_bp = &zio->io_bp_copy; /* so caller can free */ |
9babb374 | 824 | if (zio->io_child_type == ZIO_CHILD_LOGICAL) |
b128c09f | 825 | zio->io_logical = zio; |
9babb374 BB |
826 | if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) |
827 | pipeline |= ZIO_GANG_STAGES; | |
34dc7c2f | 828 | } |
b128c09f BB |
829 | |
830 | zio->io_spa = spa; | |
831 | zio->io_txg = txg; | |
34dc7c2f BB |
832 | zio->io_done = done; |
833 | zio->io_private = private; | |
834 | zio->io_type = type; | |
835 | zio->io_priority = priority; | |
b128c09f BB |
836 | zio->io_vd = vd; |
837 | zio->io_offset = offset; | |
a6255b7f | 838 | zio->io_orig_abd = zio->io_abd = data; |
2aa34383 DK |
839 | zio->io_orig_size = zio->io_size = psize; |
840 | zio->io_lsize = lsize; | |
b128c09f BB |
841 | zio->io_orig_flags = zio->io_flags = flags; |
842 | zio->io_orig_stage = zio->io_stage = stage; | |
843 | zio->io_orig_pipeline = zio->io_pipeline = pipeline; | |
3dfb57a3 | 844 | zio->io_pipeline_trace = ZIO_STAGE_OPEN; |
34dc7c2f | 845 | |
d164b209 BB |
846 | zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY); |
847 | zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); | |
848 | ||
b128c09f BB |
849 | if (zb != NULL) |
850 | zio->io_bookmark = *zb; | |
851 | ||
852 | if (pio != NULL) { | |
cc99f275 DB |
853 | if (zio->io_metaslab_class == NULL) |
854 | zio->io_metaslab_class = pio->io_metaslab_class; | |
b128c09f | 855 | if (zio->io_logical == NULL) |
34dc7c2f | 856 | zio->io_logical = pio->io_logical; |
9babb374 BB |
857 | if (zio->io_child_type == ZIO_CHILD_GANG) |
858 | zio->io_gang_leader = pio->io_gang_leader; | |
b128c09f | 859 | zio_add_child(pio, zio); |
34dc7c2f BB |
860 | } |
861 | ||
a38718a6 GA |
862 | taskq_init_ent(&zio->io_tqent); |
863 | ||
34dc7c2f BB |
864 | return (zio); |
865 | } | |
866 | ||
867 | static void | |
b128c09f | 868 | zio_destroy(zio_t *zio) |
34dc7c2f | 869 | { |
4e21fd06 | 870 | metaslab_trace_fini(&zio->io_alloc_list); |
3941503c BB |
871 | list_destroy(&zio->io_parent_list); |
872 | list_destroy(&zio->io_child_list); | |
873 | mutex_destroy(&zio->io_lock); | |
874 | cv_destroy(&zio->io_cv); | |
b128c09f | 875 | kmem_cache_free(zio_cache, zio); |
34dc7c2f BB |
876 | } |
877 | ||
878 | zio_t * | |
d164b209 | 879 | zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, |
428870ff | 880 | void *private, enum zio_flag flags) |
34dc7c2f BB |
881 | { |
882 | zio_t *zio; | |
883 | ||
2aa34383 | 884 | zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private, |
d164b209 | 885 | ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, |
b128c09f | 886 | ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); |
34dc7c2f BB |
887 | |
888 | return (zio); | |
889 | } | |
890 | ||
891 | zio_t * | |
428870ff | 892 | zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags) |
34dc7c2f | 893 | { |
d164b209 | 894 | return (zio_null(NULL, spa, NULL, done, private, flags)); |
34dc7c2f BB |
895 | } |
896 | ||
bc67cba7 PZ |
897 | static int |
898 | zfs_blkptr_verify_log(spa_t *spa, const blkptr_t *bp, | |
899 | enum blk_verify_flag blk_verify, const char *fmt, ...) | |
900 | { | |
901 | va_list adx; | |
902 | char buf[256]; | |
903 | ||
904 | va_start(adx, fmt); | |
905 | (void) vsnprintf(buf, sizeof (buf), fmt, adx); | |
906 | va_end(adx); | |
907 | ||
908 | switch (blk_verify) { | |
909 | case BLK_VERIFY_HALT: | |
f49db9b5 | 910 | dprintf_bp(bp, "blkptr at %p dprintf_bp():", bp); |
bc67cba7 PZ |
911 | zfs_panic_recover("%s: %s", spa_name(spa), buf); |
912 | break; | |
913 | case BLK_VERIFY_LOG: | |
914 | zfs_dbgmsg("%s: %s", spa_name(spa), buf); | |
915 | break; | |
916 | case BLK_VERIFY_ONLY: | |
917 | break; | |
918 | } | |
919 | ||
920 | return (1); | |
921 | } | |
922 | ||
923 | /* | |
924 | * Verify the block pointer fields contain reasonable values. This means | |
925 | * it only contains known object types, checksum/compression identifiers, | |
926 | * block sizes within the maximum allowed limits, valid DVAs, etc. | |
927 | * | |
928 | * If everything checks out B_TRUE is returned. The zfs_blkptr_verify | |
929 | * argument controls the behavior when an invalid field is detected. | |
930 | * | |
931 | * Modes for zfs_blkptr_verify: | |
932 | * 1) BLK_VERIFY_ONLY (evaluate the block) | |
933 | * 2) BLK_VERIFY_LOG (evaluate the block and log problems) | |
934 | * 3) BLK_VERIFY_HALT (call zfs_panic_recover on error) | |
935 | */ | |
936 | boolean_t | |
937 | zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp, boolean_t config_held, | |
938 | enum blk_verify_flag blk_verify) | |
63e3a861 | 939 | { |
bc67cba7 PZ |
940 | int errors = 0; |
941 | ||
63e3a861 | 942 | if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) { |
bc67cba7 PZ |
943 | errors += zfs_blkptr_verify_log(spa, bp, blk_verify, |
944 | "blkptr at %p has invalid TYPE %llu", | |
63e3a861 MA |
945 | bp, (longlong_t)BP_GET_TYPE(bp)); |
946 | } | |
947 | if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS || | |
948 | BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) { | |
bc67cba7 PZ |
949 | errors += zfs_blkptr_verify_log(spa, bp, blk_verify, |
950 | "blkptr at %p has invalid CHECKSUM %llu", | |
63e3a861 MA |
951 | bp, (longlong_t)BP_GET_CHECKSUM(bp)); |
952 | } | |
953 | if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS || | |
954 | BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) { | |
bc67cba7 PZ |
955 | errors += zfs_blkptr_verify_log(spa, bp, blk_verify, |
956 | "blkptr at %p has invalid COMPRESS %llu", | |
63e3a861 MA |
957 | bp, (longlong_t)BP_GET_COMPRESS(bp)); |
958 | } | |
959 | if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) { | |
bc67cba7 PZ |
960 | errors += zfs_blkptr_verify_log(spa, bp, blk_verify, |
961 | "blkptr at %p has invalid LSIZE %llu", | |
63e3a861 MA |
962 | bp, (longlong_t)BP_GET_LSIZE(bp)); |
963 | } | |
964 | if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) { | |
bc67cba7 PZ |
965 | errors += zfs_blkptr_verify_log(spa, bp, blk_verify, |
966 | "blkptr at %p has invalid PSIZE %llu", | |
63e3a861 MA |
967 | bp, (longlong_t)BP_GET_PSIZE(bp)); |
968 | } | |
969 | ||
970 | if (BP_IS_EMBEDDED(bp)) { | |
746d4a45 | 971 | if (BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES) { |
bc67cba7 PZ |
972 | errors += zfs_blkptr_verify_log(spa, bp, blk_verify, |
973 | "blkptr at %p has invalid ETYPE %llu", | |
63e3a861 MA |
974 | bp, (longlong_t)BPE_GET_ETYPE(bp)); |
975 | } | |
976 | } | |
977 | ||
6cb8e530 PZ |
978 | /* |
979 | * Do not verify individual DVAs if the config is not trusted. This | |
980 | * will be done once the zio is executed in vdev_mirror_map_alloc. | |
981 | */ | |
982 | if (!spa->spa_trust_config) | |
bc67cba7 | 983 | return (B_TRUE); |
6cb8e530 | 984 | |
dc04a8c7 PD |
985 | if (!config_held) |
986 | spa_config_enter(spa, SCL_VDEV, bp, RW_READER); | |
987 | else | |
988 | ASSERT(spa_config_held(spa, SCL_VDEV, RW_WRITER)); | |
63e3a861 MA |
989 | /* |
990 | * Pool-specific checks. | |
991 | * | |
992 | * Note: it would be nice to verify that the blk_birth and | |
993 | * BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze() | |
994 | * allows the birth time of log blocks (and dmu_sync()-ed blocks | |
995 | * that are in the log) to be arbitrarily large. | |
996 | */ | |
1c27024e | 997 | for (int i = 0; i < BP_GET_NDVAS(bp); i++) { |
2b56a634 MA |
998 | const dva_t *dva = &bp->blk_dva[i]; |
999 | uint64_t vdevid = DVA_GET_VDEV(dva); | |
1c27024e | 1000 | |
63e3a861 | 1001 | if (vdevid >= spa->spa_root_vdev->vdev_children) { |
bc67cba7 PZ |
1002 | errors += zfs_blkptr_verify_log(spa, bp, blk_verify, |
1003 | "blkptr at %p DVA %u has invalid VDEV %llu", | |
63e3a861 | 1004 | bp, i, (longlong_t)vdevid); |
ee3a23b8 | 1005 | continue; |
63e3a861 | 1006 | } |
1c27024e | 1007 | vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; |
63e3a861 | 1008 | if (vd == NULL) { |
bc67cba7 PZ |
1009 | errors += zfs_blkptr_verify_log(spa, bp, blk_verify, |
1010 | "blkptr at %p DVA %u has invalid VDEV %llu", | |
63e3a861 | 1011 | bp, i, (longlong_t)vdevid); |
ee3a23b8 | 1012 | continue; |
63e3a861 MA |
1013 | } |
1014 | if (vd->vdev_ops == &vdev_hole_ops) { | |
bc67cba7 PZ |
1015 | errors += zfs_blkptr_verify_log(spa, bp, blk_verify, |
1016 | "blkptr at %p DVA %u has hole VDEV %llu", | |
63e3a861 | 1017 | bp, i, (longlong_t)vdevid); |
ee3a23b8 | 1018 | continue; |
63e3a861 MA |
1019 | } |
1020 | if (vd->vdev_ops == &vdev_missing_ops) { | |
1021 | /* | |
1022 | * "missing" vdevs are valid during import, but we | |
1023 | * don't have their detailed info (e.g. asize), so | |
1024 | * we can't perform any more checks on them. | |
1025 | */ | |
1026 | continue; | |
1027 | } | |
2b56a634 MA |
1028 | uint64_t offset = DVA_GET_OFFSET(dva); |
1029 | uint64_t asize = DVA_GET_ASIZE(dva); | |
1030 | if (DVA_GET_GANG(dva)) | |
1031 | asize = vdev_gang_header_asize(vd); | |
63e3a861 | 1032 | if (offset + asize > vd->vdev_asize) { |
bc67cba7 PZ |
1033 | errors += zfs_blkptr_verify_log(spa, bp, blk_verify, |
1034 | "blkptr at %p DVA %u has invalid OFFSET %llu", | |
63e3a861 MA |
1035 | bp, i, (longlong_t)offset); |
1036 | } | |
1037 | } | |
f49db9b5 BB |
1038 | if (errors > 0) |
1039 | dprintf_bp(bp, "blkptr at %p dprintf_bp():", bp); | |
dc04a8c7 PD |
1040 | if (!config_held) |
1041 | spa_config_exit(spa, SCL_VDEV, bp); | |
bc67cba7 PZ |
1042 | |
1043 | return (errors == 0); | |
63e3a861 MA |
1044 | } |
1045 | ||
6cb8e530 PZ |
1046 | boolean_t |
1047 | zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp) | |
1048 | { | |
1049 | uint64_t vdevid = DVA_GET_VDEV(dva); | |
1050 | ||
1051 | if (vdevid >= spa->spa_root_vdev->vdev_children) | |
1052 | return (B_FALSE); | |
1053 | ||
1054 | vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid]; | |
1055 | if (vd == NULL) | |
1056 | return (B_FALSE); | |
1057 | ||
1058 | if (vd->vdev_ops == &vdev_hole_ops) | |
1059 | return (B_FALSE); | |
1060 | ||
1061 | if (vd->vdev_ops == &vdev_missing_ops) { | |
1062 | return (B_FALSE); | |
1063 | } | |
1064 | ||
1065 | uint64_t offset = DVA_GET_OFFSET(dva); | |
1066 | uint64_t asize = DVA_GET_ASIZE(dva); | |
1067 | ||
2b56a634 MA |
1068 | if (DVA_GET_GANG(dva)) |
1069 | asize = vdev_gang_header_asize(vd); | |
6cb8e530 PZ |
1070 | if (offset + asize > vd->vdev_asize) |
1071 | return (B_FALSE); | |
1072 | ||
1073 | return (B_TRUE); | |
1074 | } | |
1075 | ||
34dc7c2f | 1076 | zio_t * |
b128c09f | 1077 | zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, |
a6255b7f | 1078 | abd_t *data, uint64_t size, zio_done_func_t *done, void *private, |
5dbd68a3 | 1079 | zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb) |
34dc7c2f BB |
1080 | { |
1081 | zio_t *zio; | |
1082 | ||
bc67cba7 PZ |
1083 | (void) zfs_blkptr_verify(spa, bp, flags & ZIO_FLAG_CONFIG_WRITER, |
1084 | BLK_VERIFY_HALT); | |
63e3a861 | 1085 | |
428870ff | 1086 | zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp, |
2aa34383 | 1087 | data, size, size, done, private, |
b128c09f | 1088 | ZIO_TYPE_READ, priority, flags, NULL, 0, zb, |
428870ff BB |
1089 | ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? |
1090 | ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); | |
34dc7c2f | 1091 | |
b128c09f BB |
1092 | return (zio); |
1093 | } | |
34dc7c2f | 1094 | |
34dc7c2f | 1095 | zio_t * |
b128c09f | 1096 | zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, |
a6255b7f | 1097 | abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp, |
bc77ba73 PD |
1098 | zio_done_func_t *ready, zio_done_func_t *children_ready, |
1099 | zio_done_func_t *physdone, zio_done_func_t *done, | |
1100 | void *private, zio_priority_t priority, enum zio_flag flags, | |
1101 | const zbookmark_phys_t *zb) | |
34dc7c2f BB |
1102 | { |
1103 | zio_t *zio; | |
1104 | ||
b128c09f BB |
1105 | ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && |
1106 | zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && | |
1107 | zp->zp_compress >= ZIO_COMPRESS_OFF && | |
1108 | zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && | |
9ae529ec | 1109 | DMU_OT_IS_VALID(zp->zp_type) && |
b128c09f | 1110 | zp->zp_level < 32 && |
428870ff | 1111 | zp->zp_copies > 0 && |
03c6040b | 1112 | zp->zp_copies <= spa_max_replication(spa)); |
34dc7c2f | 1113 | |
2aa34383 | 1114 | zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private, |
b128c09f | 1115 | ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, |
428870ff BB |
1116 | ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? |
1117 | ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE); | |
34dc7c2f BB |
1118 | |
1119 | zio->io_ready = ready; | |
bc77ba73 | 1120 | zio->io_children_ready = children_ready; |
e8b96c60 | 1121 | zio->io_physdone = physdone; |
b128c09f | 1122 | zio->io_prop = *zp; |
34dc7c2f | 1123 | |
9b67f605 MA |
1124 | /* |
1125 | * Data can be NULL if we are going to call zio_write_override() to | |
1126 | * provide the already-allocated BP. But we may need the data to | |
1127 | * verify a dedup hit (if requested). In this case, don't try to | |
b5256303 TC |
1128 | * dedup (just take the already-allocated BP verbatim). Encrypted |
1129 | * dedup blocks need data as well so we also disable dedup in this | |
1130 | * case. | |
9b67f605 | 1131 | */ |
b5256303 TC |
1132 | if (data == NULL && |
1133 | (zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) { | |
9b67f605 MA |
1134 | zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE; |
1135 | } | |
1136 | ||
34dc7c2f BB |
1137 | return (zio); |
1138 | } | |
1139 | ||
1140 | zio_t * | |
a6255b7f | 1141 | zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data, |
e8b96c60 | 1142 | uint64_t size, zio_done_func_t *done, void *private, |
5dbd68a3 | 1143 | zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb) |
34dc7c2f BB |
1144 | { |
1145 | zio_t *zio; | |
1146 | ||
2aa34383 | 1147 | zio = zio_create(pio, spa, txg, bp, data, size, size, done, private, |
3dfb57a3 | 1148 | ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb, |
b128c09f | 1149 | ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); |
34dc7c2f BB |
1150 | |
1151 | return (zio); | |
1152 | } | |
1153 | ||
428870ff | 1154 | void |
03c6040b | 1155 | zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite) |
428870ff BB |
1156 | { |
1157 | ASSERT(zio->io_type == ZIO_TYPE_WRITE); | |
1158 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
1159 | ASSERT(zio->io_stage == ZIO_STAGE_OPEN); | |
1160 | ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); | |
1161 | ||
03c6040b GW |
1162 | /* |
1163 | * We must reset the io_prop to match the values that existed | |
1164 | * when the bp was first written by dmu_sync() keeping in mind | |
1165 | * that nopwrite and dedup are mutually exclusive. | |
1166 | */ | |
1167 | zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup; | |
1168 | zio->io_prop.zp_nopwrite = nopwrite; | |
428870ff BB |
1169 | zio->io_prop.zp_copies = copies; |
1170 | zio->io_bp_override = bp; | |
1171 | } | |
1172 | ||
1173 | void | |
1174 | zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) | |
1175 | { | |
9b67f605 | 1176 | |
bc67cba7 | 1177 | (void) zfs_blkptr_verify(spa, bp, B_FALSE, BLK_VERIFY_HALT); |
a1d477c2 | 1178 | |
9b67f605 MA |
1179 | /* |
1180 | * The check for EMBEDDED is a performance optimization. We | |
1181 | * process the free here (by ignoring it) rather than | |
1182 | * putting it on the list and then processing it in zio_free_sync(). | |
1183 | */ | |
1184 | if (BP_IS_EMBEDDED(bp)) | |
1185 | return; | |
13fe0198 | 1186 | metaslab_check_free(spa, bp); |
2883cad5 MA |
1187 | |
1188 | /* | |
1189 | * Frees that are for the currently-syncing txg, are not going to be | |
1190 | * deferred, and which will not need to do a read (i.e. not GANG or | |
1191 | * DEDUP), can be processed immediately. Otherwise, put them on the | |
1192 | * in-memory list for later processing. | |
93e28d66 SD |
1193 | * |
1194 | * Note that we only defer frees after zfs_sync_pass_deferred_free | |
1195 | * when the log space map feature is disabled. [see relevant comment | |
1196 | * in spa_sync_iterate_to_convergence()] | |
2883cad5 | 1197 | */ |
93e28d66 SD |
1198 | if (BP_IS_GANG(bp) || |
1199 | BP_GET_DEDUP(bp) || | |
2883cad5 | 1200 | txg != spa->spa_syncing_txg || |
93e28d66 SD |
1201 | (spa_sync_pass(spa) >= zfs_sync_pass_deferred_free && |
1202 | !spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))) { | |
2883cad5 MA |
1203 | bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); |
1204 | } else { | |
9cdf7b1f | 1205 | VERIFY3P(zio_free_sync(NULL, spa, txg, bp, 0), ==, NULL); |
2883cad5 | 1206 | } |
428870ff BB |
1207 | } |
1208 | ||
9cdf7b1f MA |
1209 | /* |
1210 | * To improve performance, this function may return NULL if we were able | |
1211 | * to do the free immediately. This avoids the cost of creating a zio | |
1212 | * (and linking it to the parent, etc). | |
1213 | */ | |
34dc7c2f | 1214 | zio_t * |
428870ff BB |
1215 | zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, |
1216 | enum zio_flag flags) | |
34dc7c2f | 1217 | { |
428870ff BB |
1218 | ASSERT(!BP_IS_HOLE(bp)); |
1219 | ASSERT(spa_syncing_txg(spa) == txg); | |
34dc7c2f | 1220 | |
9b67f605 | 1221 | if (BP_IS_EMBEDDED(bp)) |
9cdf7b1f | 1222 | return (NULL); |
9b67f605 | 1223 | |
13fe0198 | 1224 | metaslab_check_free(spa, bp); |
8c841793 | 1225 | arc_freed(spa, bp); |
d4a72f23 | 1226 | dsl_scan_freed(spa, bp); |
13fe0198 | 1227 | |
9cdf7b1f MA |
1228 | if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp)) { |
1229 | /* | |
1230 | * GANG and DEDUP blocks can induce a read (for the gang block | |
1231 | * header, or the DDT), so issue them asynchronously so that | |
1232 | * this thread is not tied up. | |
1233 | */ | |
1234 | enum zio_stage stage = | |
1235 | ZIO_FREE_PIPELINE | ZIO_STAGE_ISSUE_ASYNC; | |
2883cad5 | 1236 | |
9cdf7b1f MA |
1237 | return (zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), |
1238 | BP_GET_PSIZE(bp), NULL, NULL, | |
1239 | ZIO_TYPE_FREE, ZIO_PRIORITY_NOW, | |
1240 | flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage)); | |
1241 | } else { | |
1242 | metaslab_free(spa, bp, txg, B_FALSE); | |
1243 | return (NULL); | |
1244 | } | |
34dc7c2f BB |
1245 | } |
1246 | ||
1247 | zio_t * | |
428870ff BB |
1248 | zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, |
1249 | zio_done_func_t *done, void *private, enum zio_flag flags) | |
34dc7c2f BB |
1250 | { |
1251 | zio_t *zio; | |
1252 | ||
bc67cba7 PZ |
1253 | (void) zfs_blkptr_verify(spa, bp, flags & ZIO_FLAG_CONFIG_WRITER, |
1254 | BLK_VERIFY_HALT); | |
9b67f605 MA |
1255 | |
1256 | if (BP_IS_EMBEDDED(bp)) | |
1257 | return (zio_null(pio, spa, NULL, NULL, NULL, 0)); | |
1258 | ||
34dc7c2f BB |
1259 | /* |
1260 | * A claim is an allocation of a specific block. Claims are needed | |
1261 | * to support immediate writes in the intent log. The issue is that | |
1262 | * immediate writes contain committed data, but in a txg that was | |
1263 | * *not* committed. Upon opening the pool after an unclean shutdown, | |
1264 | * the intent log claims all blocks that contain immediate write data | |
1265 | * so that the SPA knows they're in use. | |
1266 | * | |
1267 | * All claims *must* be resolved in the first txg -- before the SPA | |
1268 | * starts allocating blocks -- so that nothing is allocated twice. | |
428870ff | 1269 | * If txg == 0 we just verify that the block is claimable. |
34dc7c2f | 1270 | */ |
d2734cce SD |
1271 | ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, |
1272 | spa_min_claim_txg(spa)); | |
1273 | ASSERT(txg == spa_min_claim_txg(spa) || txg == 0); | |
76d04993 | 1274 | ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(8) */ |
34dc7c2f | 1275 | |
b128c09f | 1276 | zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), |
2aa34383 DK |
1277 | BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, |
1278 | flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); | |
3dfb57a3 | 1279 | ASSERT0(zio->io_queued_timestamp); |
34dc7c2f BB |
1280 | |
1281 | return (zio); | |
1282 | } | |
1283 | ||
1284 | zio_t * | |
1285 | zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, | |
e8b96c60 | 1286 | zio_done_func_t *done, void *private, enum zio_flag flags) |
34dc7c2f BB |
1287 | { |
1288 | zio_t *zio; | |
1289 | int c; | |
1290 | ||
1291 | if (vd->vdev_children == 0) { | |
2aa34383 | 1292 | zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private, |
e8b96c60 | 1293 | ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, |
34dc7c2f BB |
1294 | ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); |
1295 | ||
34dc7c2f BB |
1296 | zio->io_cmd = cmd; |
1297 | } else { | |
d164b209 | 1298 | zio = zio_null(pio, spa, NULL, NULL, NULL, flags); |
34dc7c2f BB |
1299 | |
1300 | for (c = 0; c < vd->vdev_children; c++) | |
1301 | zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, | |
e8b96c60 | 1302 | done, private, flags)); |
34dc7c2f BB |
1303 | } |
1304 | ||
1305 | return (zio); | |
1306 | } | |
1307 | ||
1b939560 BB |
1308 | zio_t * |
1309 | zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, | |
1310 | zio_done_func_t *done, void *private, zio_priority_t priority, | |
1311 | enum zio_flag flags, enum trim_flag trim_flags) | |
1312 | { | |
1313 | zio_t *zio; | |
1314 | ||
1315 | ASSERT0(vd->vdev_children); | |
1316 | ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); | |
1317 | ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift)); | |
1318 | ASSERT3U(size, !=, 0); | |
1319 | ||
1320 | zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done, | |
1321 | private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL, | |
1322 | vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE); | |
1323 | zio->io_trim_flags = trim_flags; | |
1324 | ||
1325 | return (zio); | |
1326 | } | |
1327 | ||
34dc7c2f BB |
1328 | zio_t * |
1329 | zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, | |
a6255b7f | 1330 | abd_t *data, int checksum, zio_done_func_t *done, void *private, |
e8b96c60 | 1331 | zio_priority_t priority, enum zio_flag flags, boolean_t labels) |
34dc7c2f BB |
1332 | { |
1333 | zio_t *zio; | |
34dc7c2f | 1334 | |
b128c09f BB |
1335 | ASSERT(vd->vdev_children == 0); |
1336 | ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || | |
1337 | offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); | |
1338 | ASSERT3U(offset + size, <=, vd->vdev_psize); | |
34dc7c2f | 1339 | |
2aa34383 DK |
1340 | zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, |
1341 | private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd, | |
1342 | offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); | |
34dc7c2f | 1343 | |
b128c09f | 1344 | zio->io_prop.zp_checksum = checksum; |
34dc7c2f BB |
1345 | |
1346 | return (zio); | |
1347 | } | |
1348 | ||
1349 | zio_t * | |
1350 | zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, | |
a6255b7f | 1351 | abd_t *data, int checksum, zio_done_func_t *done, void *private, |
e8b96c60 | 1352 | zio_priority_t priority, enum zio_flag flags, boolean_t labels) |
34dc7c2f | 1353 | { |
34dc7c2f | 1354 | zio_t *zio; |
34dc7c2f | 1355 | |
b128c09f BB |
1356 | ASSERT(vd->vdev_children == 0); |
1357 | ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || | |
1358 | offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); | |
1359 | ASSERT3U(offset + size, <=, vd->vdev_psize); | |
34dc7c2f | 1360 | |
2aa34383 DK |
1361 | zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done, |
1362 | private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd, | |
1363 | offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); | |
34dc7c2f | 1364 | |
b128c09f | 1365 | zio->io_prop.zp_checksum = checksum; |
34dc7c2f | 1366 | |
3c67d83a | 1367 | if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) { |
34dc7c2f | 1368 | /* |
428870ff | 1369 | * zec checksums are necessarily destructive -- they modify |
b128c09f | 1370 | * the end of the write buffer to hold the verifier/checksum. |
34dc7c2f | 1371 | * Therefore, we must make a local copy in case the data is |
b128c09f | 1372 | * being written to multiple places in parallel. |
34dc7c2f | 1373 | */ |
a6255b7f DQ |
1374 | abd_t *wbuf = abd_alloc_sametype(data, size); |
1375 | abd_copy(wbuf, data, size); | |
1376 | ||
b128c09f | 1377 | zio_push_transform(zio, wbuf, size, size, NULL); |
34dc7c2f BB |
1378 | } |
1379 | ||
1380 | return (zio); | |
1381 | } | |
1382 | ||
1383 | /* | |
b128c09f | 1384 | * Create a child I/O to do some work for us. |
34dc7c2f BB |
1385 | */ |
1386 | zio_t * | |
b128c09f | 1387 | zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, |
4ea3f864 GM |
1388 | abd_t *data, uint64_t size, int type, zio_priority_t priority, |
1389 | enum zio_flag flags, zio_done_func_t *done, void *private) | |
34dc7c2f | 1390 | { |
428870ff | 1391 | enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; |
b128c09f BB |
1392 | zio_t *zio; |
1393 | ||
a1d477c2 MA |
1394 | /* |
1395 | * vdev child I/Os do not propagate their error to the parent. | |
1396 | * Therefore, for correct operation the caller *must* check for | |
1397 | * and handle the error in the child i/o's done callback. | |
1398 | * The only exceptions are i/os that we don't care about | |
1399 | * (OPTIONAL or REPAIR). | |
1400 | */ | |
1401 | ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) || | |
1402 | done != NULL); | |
1403 | ||
34dc7c2f BB |
1404 | if (type == ZIO_TYPE_READ && bp != NULL) { |
1405 | /* | |
1406 | * If we have the bp, then the child should perform the | |
1407 | * checksum and the parent need not. This pushes error | |
1408 | * detection as close to the leaves as possible and | |
1409 | * eliminates redundant checksums in the interior nodes. | |
1410 | */ | |
428870ff BB |
1411 | pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; |
1412 | pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; | |
34dc7c2f BB |
1413 | } |
1414 | ||
a1d477c2 MA |
1415 | if (vd->vdev_ops->vdev_op_leaf) { |
1416 | ASSERT0(vd->vdev_children); | |
b128c09f | 1417 | offset += VDEV_LABEL_START_SIZE; |
a1d477c2 | 1418 | } |
b128c09f | 1419 | |
a1d477c2 | 1420 | flags |= ZIO_VDEV_CHILD_FLAGS(pio); |
428870ff BB |
1421 | |
1422 | /* | |
1423 | * If we've decided to do a repair, the write is not speculative -- | |
1424 | * even if the original read was. | |
1425 | */ | |
1426 | if (flags & ZIO_FLAG_IO_REPAIR) | |
1427 | flags &= ~ZIO_FLAG_SPECULATIVE; | |
1428 | ||
3dfb57a3 DB |
1429 | /* |
1430 | * If we're creating a child I/O that is not associated with a | |
1431 | * top-level vdev, then the child zio is not an allocating I/O. | |
1432 | * If this is a retried I/O then we ignore it since we will | |
1433 | * have already processed the original allocating I/O. | |
1434 | */ | |
1435 | if (flags & ZIO_FLAG_IO_ALLOCATING && | |
1436 | (vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) { | |
cc99f275 DB |
1437 | ASSERT(pio->io_metaslab_class != NULL); |
1438 | ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled); | |
3dfb57a3 DB |
1439 | ASSERT(type == ZIO_TYPE_WRITE); |
1440 | ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE); | |
1441 | ASSERT(!(flags & ZIO_FLAG_IO_REPAIR)); | |
1442 | ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) || | |
1443 | pio->io_child_type == ZIO_CHILD_GANG); | |
1444 | ||
1445 | flags &= ~ZIO_FLAG_IO_ALLOCATING; | |
1446 | } | |
1447 | ||
1448 | ||
2aa34383 | 1449 | zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size, |
428870ff BB |
1450 | done, private, type, priority, flags, vd, offset, &pio->io_bookmark, |
1451 | ZIO_STAGE_VDEV_IO_START >> 1, pipeline); | |
3dfb57a3 | 1452 | ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); |
34dc7c2f | 1453 | |
e8b96c60 MA |
1454 | zio->io_physdone = pio->io_physdone; |
1455 | if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL) | |
1456 | zio->io_logical->io_phys_children++; | |
1457 | ||
b128c09f | 1458 | return (zio); |
34dc7c2f BB |
1459 | } |
1460 | ||
b128c09f | 1461 | zio_t * |
a6255b7f | 1462 | zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size, |
9e052db4 | 1463 | zio_type_t type, zio_priority_t priority, enum zio_flag flags, |
e9aa730c | 1464 | zio_done_func_t *done, void *private) |
34dc7c2f | 1465 | { |
b128c09f | 1466 | zio_t *zio; |
34dc7c2f | 1467 | |
b128c09f | 1468 | ASSERT(vd->vdev_ops->vdev_op_leaf); |
34dc7c2f | 1469 | |
b128c09f | 1470 | zio = zio_create(NULL, vd->vdev_spa, 0, NULL, |
2aa34383 | 1471 | data, size, size, done, private, type, priority, |
e8b96c60 | 1472 | flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED, |
b128c09f | 1473 | vd, offset, NULL, |
428870ff | 1474 | ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); |
34dc7c2f | 1475 | |
b128c09f | 1476 | return (zio); |
34dc7c2f BB |
1477 | } |
1478 | ||
1479 | void | |
b128c09f | 1480 | zio_flush(zio_t *zio, vdev_t *vd) |
34dc7c2f | 1481 | { |
b128c09f | 1482 | zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE, |
e8b96c60 | 1483 | NULL, NULL, |
b128c09f | 1484 | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY)); |
34dc7c2f BB |
1485 | } |
1486 | ||
428870ff BB |
1487 | void |
1488 | zio_shrink(zio_t *zio, uint64_t size) | |
1489 | { | |
1ce23dca PS |
1490 | ASSERT3P(zio->io_executor, ==, NULL); |
1491 | ASSERT3U(zio->io_orig_size, ==, zio->io_size); | |
1492 | ASSERT3U(size, <=, zio->io_size); | |
428870ff BB |
1493 | |
1494 | /* | |
1495 | * We don't shrink for raidz because of problems with the | |
1496 | * reconstruction when reading back less than the block size. | |
1497 | * Note, BP_IS_RAIDZ() assumes no compression. | |
1498 | */ | |
1499 | ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); | |
2aa34383 DK |
1500 | if (!BP_IS_RAIDZ(zio->io_bp)) { |
1501 | /* we are not doing a raw write */ | |
1502 | ASSERT3U(zio->io_size, ==, zio->io_lsize); | |
1503 | zio->io_orig_size = zio->io_size = zio->io_lsize = size; | |
1504 | } | |
428870ff BB |
1505 | } |
1506 | ||
34dc7c2f BB |
1507 | /* |
1508 | * ========================================================================== | |
b128c09f | 1509 | * Prepare to read and write logical blocks |
34dc7c2f BB |
1510 | * ========================================================================== |
1511 | */ | |
b128c09f | 1512 | |
62840030 | 1513 | static zio_t * |
b128c09f | 1514 | zio_read_bp_init(zio_t *zio) |
34dc7c2f | 1515 | { |
b128c09f | 1516 | blkptr_t *bp = zio->io_bp; |
b5256303 TC |
1517 | uint64_t psize = |
1518 | BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp); | |
34dc7c2f | 1519 | |
a1d477c2 MA |
1520 | ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); |
1521 | ||
fb5f0bc8 | 1522 | if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && |
9babb374 | 1523 | zio->io_child_type == ZIO_CHILD_LOGICAL && |
b5256303 | 1524 | !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) { |
a6255b7f DQ |
1525 | zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize), |
1526 | psize, psize, zio_decompress); | |
34dc7c2f | 1527 | } |
34dc7c2f | 1528 | |
b5256303 TC |
1529 | if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) || |
1530 | BP_HAS_INDIRECT_MAC_CKSUM(bp)) && | |
1531 | zio->io_child_type == ZIO_CHILD_LOGICAL) { | |
1532 | zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize), | |
1533 | psize, psize, zio_decrypt); | |
1534 | } | |
1535 | ||
9b67f605 | 1536 | if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) { |
a6255b7f DQ |
1537 | int psize = BPE_GET_PSIZE(bp); |
1538 | void *data = abd_borrow_buf(zio->io_abd, psize); | |
1539 | ||
9b67f605 | 1540 | zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; |
a6255b7f DQ |
1541 | decode_embedded_bp_compressed(bp, data); |
1542 | abd_return_buf_copy(zio->io_abd, data, psize); | |
9b67f605 MA |
1543 | } else { |
1544 | ASSERT(!BP_IS_EMBEDDED(bp)); | |
a1d477c2 | 1545 | ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); |
9b67f605 MA |
1546 | } |
1547 | ||
9ae529ec | 1548 | if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0) |
b128c09f BB |
1549 | zio->io_flags |= ZIO_FLAG_DONT_CACHE; |
1550 | ||
428870ff BB |
1551 | if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP) |
1552 | zio->io_flags |= ZIO_FLAG_DONT_CACHE; | |
1553 | ||
1554 | if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) | |
1555 | zio->io_pipeline = ZIO_DDT_READ_PIPELINE; | |
1556 | ||
62840030 | 1557 | return (zio); |
34dc7c2f BB |
1558 | } |
1559 | ||
62840030 | 1560 | static zio_t * |
b128c09f | 1561 | zio_write_bp_init(zio_t *zio) |
34dc7c2f | 1562 | { |
b128c09f | 1563 | if (!IO_IS_ALLOCATING(zio)) |
62840030 | 1564 | return (zio); |
34dc7c2f | 1565 | |
428870ff BB |
1566 | ASSERT(zio->io_child_type != ZIO_CHILD_DDT); |
1567 | ||
1568 | if (zio->io_bp_override) { | |
3dfb57a3 DB |
1569 | blkptr_t *bp = zio->io_bp; |
1570 | zio_prop_t *zp = &zio->io_prop; | |
1571 | ||
428870ff BB |
1572 | ASSERT(bp->blk_birth != zio->io_txg); |
1573 | ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0); | |
1574 | ||
1575 | *bp = *zio->io_bp_override; | |
1576 | zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; | |
1577 | ||
9b67f605 | 1578 | if (BP_IS_EMBEDDED(bp)) |
62840030 | 1579 | return (zio); |
9b67f605 | 1580 | |
03c6040b GW |
1581 | /* |
1582 | * If we've been overridden and nopwrite is set then | |
1583 | * set the flag accordingly to indicate that a nopwrite | |
1584 | * has already occurred. | |
1585 | */ | |
1586 | if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) { | |
1587 | ASSERT(!zp->zp_dedup); | |
3dfb57a3 | 1588 | ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum); |
03c6040b | 1589 | zio->io_flags |= ZIO_FLAG_NOPWRITE; |
62840030 | 1590 | return (zio); |
03c6040b GW |
1591 | } |
1592 | ||
1593 | ASSERT(!zp->zp_nopwrite); | |
1594 | ||
428870ff | 1595 | if (BP_IS_HOLE(bp) || !zp->zp_dedup) |
62840030 | 1596 | return (zio); |
428870ff | 1597 | |
3c67d83a TH |
1598 | ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags & |
1599 | ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify); | |
428870ff | 1600 | |
b5256303 TC |
1601 | if (BP_GET_CHECKSUM(bp) == zp->zp_checksum && |
1602 | !zp->zp_encrypt) { | |
428870ff BB |
1603 | BP_SET_DEDUP(bp, 1); |
1604 | zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; | |
62840030 | 1605 | return (zio); |
428870ff | 1606 | } |
3dfb57a3 DB |
1607 | |
1608 | /* | |
1609 | * We were unable to handle this as an override bp, treat | |
1610 | * it as a regular write I/O. | |
1611 | */ | |
5511754b | 1612 | zio->io_bp_override = NULL; |
3dfb57a3 DB |
1613 | *bp = zio->io_bp_orig; |
1614 | zio->io_pipeline = zio->io_orig_pipeline; | |
1615 | } | |
1616 | ||
62840030 | 1617 | return (zio); |
3dfb57a3 DB |
1618 | } |
1619 | ||
62840030 | 1620 | static zio_t * |
3dfb57a3 DB |
1621 | zio_write_compress(zio_t *zio) |
1622 | { | |
1623 | spa_t *spa = zio->io_spa; | |
1624 | zio_prop_t *zp = &zio->io_prop; | |
1625 | enum zio_compress compress = zp->zp_compress; | |
1626 | blkptr_t *bp = zio->io_bp; | |
1627 | uint64_t lsize = zio->io_lsize; | |
1628 | uint64_t psize = zio->io_size; | |
1629 | int pass = 1; | |
1630 | ||
3dfb57a3 DB |
1631 | /* |
1632 | * If our children haven't all reached the ready stage, | |
1633 | * wait for them and then repeat this pipeline stage. | |
1634 | */ | |
ddc751d5 GW |
1635 | if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT | |
1636 | ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) { | |
62840030 | 1637 | return (NULL); |
ddc751d5 | 1638 | } |
3dfb57a3 DB |
1639 | |
1640 | if (!IO_IS_ALLOCATING(zio)) | |
62840030 | 1641 | return (zio); |
3dfb57a3 DB |
1642 | |
1643 | if (zio->io_children_ready != NULL) { | |
1644 | /* | |
1645 | * Now that all our children are ready, run the callback | |
1646 | * associated with this zio in case it wants to modify the | |
1647 | * data to be written. | |
1648 | */ | |
1649 | ASSERT3U(zp->zp_level, >, 0); | |
1650 | zio->io_children_ready(zio); | |
428870ff | 1651 | } |
34dc7c2f | 1652 | |
3dfb57a3 DB |
1653 | ASSERT(zio->io_child_type != ZIO_CHILD_DDT); |
1654 | ASSERT(zio->io_bp_override == NULL); | |
1655 | ||
b0bc7a84 | 1656 | if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) { |
b128c09f BB |
1657 | /* |
1658 | * We're rewriting an existing block, which means we're | |
1659 | * working on behalf of spa_sync(). For spa_sync() to | |
1660 | * converge, it must eventually be the case that we don't | |
1661 | * have to allocate new blocks. But compression changes | |
1662 | * the blocksize, which forces a reallocate, and makes | |
1663 | * convergence take longer. Therefore, after the first | |
1664 | * few passes, stop compressing to ensure convergence. | |
1665 | */ | |
428870ff BB |
1666 | pass = spa_sync_pass(spa); |
1667 | ||
1668 | ASSERT(zio->io_txg == spa_syncing_txg(spa)); | |
1669 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
1670 | ASSERT(!BP_GET_DEDUP(bp)); | |
34dc7c2f | 1671 | |
55d85d5a | 1672 | if (pass >= zfs_sync_pass_dont_compress) |
b128c09f | 1673 | compress = ZIO_COMPRESS_OFF; |
34dc7c2f | 1674 | |
b128c09f | 1675 | /* Make sure someone doesn't change their mind on overwrites */ |
9b67f605 | 1676 | ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp), |
428870ff | 1677 | spa_max_replication(spa)) == BP_GET_NDVAS(bp)); |
b128c09f | 1678 | } |
34dc7c2f | 1679 | |
2aa34383 | 1680 | /* If it's a compressed write that is not raw, compress the buffer. */ |
b5256303 TC |
1681 | if (compress != ZIO_COMPRESS_OFF && |
1682 | !(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) { | |
428870ff | 1683 | void *cbuf = zio_buf_alloc(lsize); |
10b3c7f5 MN |
1684 | psize = zio_compress_data(compress, zio->io_abd, cbuf, lsize, |
1685 | zp->zp_complevel); | |
1686 | if (psize == 0 || psize >= lsize) { | |
b128c09f | 1687 | compress = ZIO_COMPRESS_OFF; |
428870ff | 1688 | zio_buf_free(cbuf, lsize); |
b5256303 TC |
1689 | } else if (!zp->zp_dedup && !zp->zp_encrypt && |
1690 | psize <= BPE_PAYLOAD_SIZE && | |
9b67f605 MA |
1691 | zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) && |
1692 | spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) { | |
1693 | encode_embedded_bp_compressed(bp, | |
1694 | cbuf, compress, lsize, psize); | |
1695 | BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA); | |
1696 | BP_SET_TYPE(bp, zio->io_prop.zp_type); | |
1697 | BP_SET_LEVEL(bp, zio->io_prop.zp_level); | |
1698 | zio_buf_free(cbuf, lsize); | |
1699 | bp->blk_birth = zio->io_txg; | |
1700 | zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; | |
1701 | ASSERT(spa_feature_is_active(spa, | |
1702 | SPA_FEATURE_EMBEDDED_DATA)); | |
62840030 | 1703 | return (zio); |
428870ff | 1704 | } else { |
9b67f605 | 1705 | /* |
b2255edc BB |
1706 | * Round compressed size up to the minimum allocation |
1707 | * size of the smallest-ashift device, and zero the | |
1708 | * tail. This ensures that the compressed size of the | |
1709 | * BP (and thus compressratio property) are correct, | |
c3520e7f MA |
1710 | * in that we charge for the padding used to fill out |
1711 | * the last sector. | |
9b67f605 | 1712 | */ |
b2255edc BB |
1713 | ASSERT3U(spa->spa_min_alloc, >=, SPA_MINBLOCKSHIFT); |
1714 | size_t rounded = (size_t)roundup(psize, | |
1715 | spa->spa_min_alloc); | |
c3520e7f | 1716 | if (rounded >= lsize) { |
9b67f605 MA |
1717 | compress = ZIO_COMPRESS_OFF; |
1718 | zio_buf_free(cbuf, lsize); | |
c3520e7f | 1719 | psize = lsize; |
9b67f605 | 1720 | } else { |
a6255b7f DQ |
1721 | abd_t *cdata = abd_get_from_buf(cbuf, lsize); |
1722 | abd_take_ownership_of_buf(cdata, B_TRUE); | |
1723 | abd_zero_off(cdata, psize, rounded - psize); | |
c3520e7f | 1724 | psize = rounded; |
a6255b7f | 1725 | zio_push_transform(zio, cdata, |
9b67f605 MA |
1726 | psize, lsize, NULL); |
1727 | } | |
b128c09f | 1728 | } |
3dfb57a3 DB |
1729 | |
1730 | /* | |
1731 | * We were unable to handle this as an override bp, treat | |
1732 | * it as a regular write I/O. | |
1733 | */ | |
1734 | zio->io_bp_override = NULL; | |
1735 | *bp = zio->io_bp_orig; | |
1736 | zio->io_pipeline = zio->io_orig_pipeline; | |
1737 | ||
b1d21733 TC |
1738 | } else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 && |
1739 | zp->zp_type == DMU_OT_DNODE) { | |
1740 | /* | |
1741 | * The DMU actually relies on the zio layer's compression | |
1742 | * to free metadnode blocks that have had all contained | |
1743 | * dnodes freed. As a result, even when doing a raw | |
1744 | * receive, we must check whether the block can be compressed | |
1745 | * to a hole. | |
1746 | */ | |
1747 | psize = zio_compress_data(ZIO_COMPRESS_EMPTY, | |
10b3c7f5 MN |
1748 | zio->io_abd, NULL, lsize, zp->zp_complevel); |
1749 | if (psize == 0 || psize >= lsize) | |
b1d21733 | 1750 | compress = ZIO_COMPRESS_OFF; |
2aa34383 DK |
1751 | } else { |
1752 | ASSERT3U(psize, !=, 0); | |
b128c09f | 1753 | } |
34dc7c2f | 1754 | |
b128c09f BB |
1755 | /* |
1756 | * The final pass of spa_sync() must be all rewrites, but the first | |
1757 | * few passes offer a trade-off: allocating blocks defers convergence, | |
1758 | * but newly allocated blocks are sequential, so they can be written | |
1759 | * to disk faster. Therefore, we allow the first few passes of | |
1760 | * spa_sync() to allocate new blocks, but force rewrites after that. | |
1761 | * There should only be a handful of blocks after pass 1 in any case. | |
1762 | */ | |
b0bc7a84 MG |
1763 | if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg && |
1764 | BP_GET_PSIZE(bp) == psize && | |
55d85d5a | 1765 | pass >= zfs_sync_pass_rewrite) { |
cc99f275 | 1766 | VERIFY3U(psize, !=, 0); |
1c27024e | 1767 | enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; |
cc99f275 | 1768 | |
b128c09f BB |
1769 | zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; |
1770 | zio->io_flags |= ZIO_FLAG_IO_REWRITE; | |
1771 | } else { | |
1772 | BP_ZERO(bp); | |
1773 | zio->io_pipeline = ZIO_WRITE_PIPELINE; | |
1774 | } | |
34dc7c2f | 1775 | |
428870ff | 1776 | if (psize == 0) { |
b0bc7a84 MG |
1777 | if (zio->io_bp_orig.blk_birth != 0 && |
1778 | spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { | |
1779 | BP_SET_LSIZE(bp, lsize); | |
1780 | BP_SET_TYPE(bp, zp->zp_type); | |
1781 | BP_SET_LEVEL(bp, zp->zp_level); | |
1782 | BP_SET_BIRTH(bp, zio->io_txg, 0); | |
1783 | } | |
b128c09f BB |
1784 | zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; |
1785 | } else { | |
1786 | ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); | |
1787 | BP_SET_LSIZE(bp, lsize); | |
b0bc7a84 MG |
1788 | BP_SET_TYPE(bp, zp->zp_type); |
1789 | BP_SET_LEVEL(bp, zp->zp_level); | |
428870ff | 1790 | BP_SET_PSIZE(bp, psize); |
b128c09f BB |
1791 | BP_SET_COMPRESS(bp, compress); |
1792 | BP_SET_CHECKSUM(bp, zp->zp_checksum); | |
428870ff | 1793 | BP_SET_DEDUP(bp, zp->zp_dedup); |
b128c09f | 1794 | BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); |
428870ff BB |
1795 | if (zp->zp_dedup) { |
1796 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
1797 | ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); | |
b5256303 TC |
1798 | ASSERT(!zp->zp_encrypt || |
1799 | DMU_OT_IS_ENCRYPTED(zp->zp_type)); | |
428870ff BB |
1800 | zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; |
1801 | } | |
03c6040b GW |
1802 | if (zp->zp_nopwrite) { |
1803 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
1804 | ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); | |
1805 | zio->io_pipeline |= ZIO_STAGE_NOP_WRITE; | |
1806 | } | |
428870ff | 1807 | } |
62840030 | 1808 | return (zio); |
428870ff BB |
1809 | } |
1810 | ||
62840030 | 1811 | static zio_t * |
428870ff BB |
1812 | zio_free_bp_init(zio_t *zio) |
1813 | { | |
1814 | blkptr_t *bp = zio->io_bp; | |
1815 | ||
1816 | if (zio->io_child_type == ZIO_CHILD_LOGICAL) { | |
1817 | if (BP_GET_DEDUP(bp)) | |
1818 | zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; | |
b128c09f | 1819 | } |
34dc7c2f | 1820 | |
a1d477c2 MA |
1821 | ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy); |
1822 | ||
62840030 | 1823 | return (zio); |
34dc7c2f BB |
1824 | } |
1825 | ||
b128c09f BB |
1826 | /* |
1827 | * ========================================================================== | |
1828 | * Execute the I/O pipeline | |
1829 | * ========================================================================== | |
1830 | */ | |
1831 | ||
1832 | static void | |
7ef5e54e | 1833 | zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline) |
34dc7c2f | 1834 | { |
428870ff | 1835 | spa_t *spa = zio->io_spa; |
b128c09f | 1836 | zio_type_t t = zio->io_type; |
a38718a6 | 1837 | int flags = (cutinline ? TQ_FRONT : 0); |
34dc7c2f BB |
1838 | |
1839 | /* | |
9babb374 BB |
1840 | * If we're a config writer or a probe, the normal issue and |
1841 | * interrupt threads may all be blocked waiting for the config lock. | |
1842 | * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. | |
34dc7c2f | 1843 | */ |
9babb374 | 1844 | if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) |
b128c09f | 1845 | t = ZIO_TYPE_NULL; |
34dc7c2f BB |
1846 | |
1847 | /* | |
b128c09f | 1848 | * A similar issue exists for the L2ARC write thread until L2ARC 2.0. |
34dc7c2f | 1849 | */ |
b128c09f BB |
1850 | if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) |
1851 | t = ZIO_TYPE_NULL; | |
34dc7c2f | 1852 | |
428870ff | 1853 | /* |
7ef5e54e AL |
1854 | * If this is a high priority I/O, then use the high priority taskq if |
1855 | * available. | |
428870ff | 1856 | */ |
18b14b17 GW |
1857 | if ((zio->io_priority == ZIO_PRIORITY_NOW || |
1858 | zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) && | |
7ef5e54e | 1859 | spa->spa_zio_taskq[t][q + 1].stqs_count != 0) |
428870ff BB |
1860 | q++; |
1861 | ||
1862 | ASSERT3U(q, <, ZIO_TASKQ_TYPES); | |
5cc556b4 | 1863 | |
a38718a6 GA |
1864 | /* |
1865 | * NB: We are assuming that the zio can only be dispatched | |
1866 | * to a single taskq at a time. It would be a grievous error | |
1867 | * to dispatch the zio to another taskq at the same time. | |
1868 | */ | |
1869 | ASSERT(taskq_empty_ent(&zio->io_tqent)); | |
7ef5e54e AL |
1870 | spa_taskq_dispatch_ent(spa, t, q, (task_func_t *)zio_execute, zio, |
1871 | flags, &zio->io_tqent); | |
b128c09f | 1872 | } |
34dc7c2f | 1873 | |
b128c09f | 1874 | static boolean_t |
7ef5e54e | 1875 | zio_taskq_member(zio_t *zio, zio_taskq_type_t q) |
b128c09f | 1876 | { |
b128c09f | 1877 | spa_t *spa = zio->io_spa; |
34dc7c2f | 1878 | |
b3212d2f MA |
1879 | taskq_t *tq = taskq_of_curthread(); |
1880 | ||
1c27024e | 1881 | for (zio_type_t t = 0; t < ZIO_TYPES; t++) { |
7ef5e54e AL |
1882 | spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; |
1883 | uint_t i; | |
1884 | for (i = 0; i < tqs->stqs_count; i++) { | |
b3212d2f | 1885 | if (tqs->stqs_taskq[i] == tq) |
7ef5e54e AL |
1886 | return (B_TRUE); |
1887 | } | |
1888 | } | |
34dc7c2f | 1889 | |
b128c09f BB |
1890 | return (B_FALSE); |
1891 | } | |
34dc7c2f | 1892 | |
62840030 | 1893 | static zio_t * |
b128c09f BB |
1894 | zio_issue_async(zio_t *zio) |
1895 | { | |
428870ff | 1896 | zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); |
b128c09f | 1897 | |
62840030 | 1898 | return (NULL); |
34dc7c2f BB |
1899 | } |
1900 | ||
b128c09f BB |
1901 | void |
1902 | zio_interrupt(zio_t *zio) | |
34dc7c2f | 1903 | { |
428870ff | 1904 | zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); |
b128c09f | 1905 | } |
34dc7c2f | 1906 | |
d3c1e45b MM |
1907 | void |
1908 | zio_delay_interrupt(zio_t *zio) | |
1909 | { | |
1910 | /* | |
1911 | * The timeout_generic() function isn't defined in userspace, so | |
1912 | * rather than trying to implement the function, the zio delay | |
1913 | * functionality has been disabled for userspace builds. | |
1914 | */ | |
1915 | ||
1916 | #ifdef _KERNEL | |
1917 | /* | |
1918 | * If io_target_timestamp is zero, then no delay has been registered | |
1919 | * for this IO, thus jump to the end of this function and "skip" the | |
1920 | * delay; issuing it directly to the zio layer. | |
1921 | */ | |
1922 | if (zio->io_target_timestamp != 0) { | |
1923 | hrtime_t now = gethrtime(); | |
1924 | ||
1925 | if (now >= zio->io_target_timestamp) { | |
1926 | /* | |
1927 | * This IO has already taken longer than the target | |
1928 | * delay to complete, so we don't want to delay it | |
1929 | * any longer; we "miss" the delay and issue it | |
1930 | * directly to the zio layer. This is likely due to | |
1931 | * the target latency being set to a value less than | |
1932 | * the underlying hardware can satisfy (e.g. delay | |
1933 | * set to 1ms, but the disks take 10ms to complete an | |
1934 | * IO request). | |
1935 | */ | |
1936 | ||
1937 | DTRACE_PROBE2(zio__delay__miss, zio_t *, zio, | |
1938 | hrtime_t, now); | |
1939 | ||
1940 | zio_interrupt(zio); | |
1941 | } else { | |
1942 | taskqid_t tid; | |
1943 | hrtime_t diff = zio->io_target_timestamp - now; | |
1944 | clock_t expire_at_tick = ddi_get_lbolt() + | |
1945 | NSEC_TO_TICK(diff); | |
1946 | ||
1947 | DTRACE_PROBE3(zio__delay__hit, zio_t *, zio, | |
1948 | hrtime_t, now, hrtime_t, diff); | |
1949 | ||
1950 | if (NSEC_TO_TICK(diff) == 0) { | |
1951 | /* Our delay is less than a jiffy - just spin */ | |
1952 | zfs_sleep_until(zio->io_target_timestamp); | |
1953 | zio_interrupt(zio); | |
1954 | } else { | |
1955 | /* | |
1956 | * Use taskq_dispatch_delay() in the place of | |
1957 | * OpenZFS's timeout_generic(). | |
1958 | */ | |
1959 | tid = taskq_dispatch_delay(system_taskq, | |
1960 | (task_func_t *)zio_interrupt, | |
1961 | zio, TQ_NOSLEEP, expire_at_tick); | |
1962 | if (tid == TASKQID_INVALID) { | |
1963 | /* | |
1964 | * Couldn't allocate a task. Just | |
1965 | * finish the zio without a delay. | |
1966 | */ | |
1967 | zio_interrupt(zio); | |
1968 | } | |
1969 | } | |
1970 | } | |
1971 | return; | |
1972 | } | |
1973 | #endif | |
1974 | DTRACE_PROBE1(zio__delay__skip, zio_t *, zio); | |
1975 | zio_interrupt(zio); | |
1976 | } | |
1977 | ||
8fb1ede1 | 1978 | static void |
638dd5f4 | 1979 | zio_deadman_impl(zio_t *pio, int ziodepth) |
8fb1ede1 BB |
1980 | { |
1981 | zio_t *cio, *cio_next; | |
1982 | zio_link_t *zl = NULL; | |
1983 | vdev_t *vd = pio->io_vd; | |
1984 | ||
638dd5f4 TC |
1985 | if (zio_deadman_log_all || (vd != NULL && vd->vdev_ops->vdev_op_leaf)) { |
1986 | vdev_queue_t *vq = vd ? &vd->vdev_queue : NULL; | |
8fb1ede1 BB |
1987 | zbookmark_phys_t *zb = &pio->io_bookmark; |
1988 | uint64_t delta = gethrtime() - pio->io_timestamp; | |
1989 | uint64_t failmode = spa_get_deadman_failmode(pio->io_spa); | |
1990 | ||
a887d653 | 1991 | zfs_dbgmsg("slow zio[%d]: zio=%px timestamp=%llu " |
8fb1ede1 BB |
1992 | "delta=%llu queued=%llu io=%llu " |
1993 | "path=%s last=%llu " | |
1994 | "type=%d priority=%d flags=0x%x " | |
1995 | "stage=0x%x pipeline=0x%x pipeline-trace=0x%x " | |
1996 | "objset=%llu object=%llu level=%llu blkid=%llu " | |
1997 | "offset=%llu size=%llu error=%d", | |
638dd5f4 | 1998 | ziodepth, pio, pio->io_timestamp, |
8fb1ede1 | 1999 | delta, pio->io_delta, pio->io_delay, |
638dd5f4 | 2000 | vd ? vd->vdev_path : "NULL", vq ? vq->vq_io_complete_ts : 0, |
8fb1ede1 | 2001 | pio->io_type, pio->io_priority, pio->io_flags, |
638dd5f4 | 2002 | pio->io_stage, pio->io_pipeline, pio->io_pipeline_trace, |
8fb1ede1 BB |
2003 | zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid, |
2004 | pio->io_offset, pio->io_size, pio->io_error); | |
1144586b | 2005 | (void) zfs_ereport_post(FM_EREPORT_ZFS_DEADMAN, |
4f072827 | 2006 | pio->io_spa, vd, zb, pio, 0); |
8fb1ede1 BB |
2007 | |
2008 | if (failmode == ZIO_FAILURE_MODE_CONTINUE && | |
2009 | taskq_empty_ent(&pio->io_tqent)) { | |
2010 | zio_interrupt(pio); | |
2011 | } | |
2012 | } | |
2013 | ||
2014 | mutex_enter(&pio->io_lock); | |
2015 | for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { | |
2016 | cio_next = zio_walk_children(pio, &zl); | |
638dd5f4 | 2017 | zio_deadman_impl(cio, ziodepth + 1); |
8fb1ede1 BB |
2018 | } |
2019 | mutex_exit(&pio->io_lock); | |
2020 | } | |
2021 | ||
2022 | /* | |
2023 | * Log the critical information describing this zio and all of its children | |
2024 | * using the zfs_dbgmsg() interface then post deadman event for the ZED. | |
2025 | */ | |
2026 | void | |
2027 | zio_deadman(zio_t *pio, char *tag) | |
2028 | { | |
2029 | spa_t *spa = pio->io_spa; | |
2030 | char *name = spa_name(spa); | |
2031 | ||
2032 | if (!zfs_deadman_enabled || spa_suspended(spa)) | |
2033 | return; | |
2034 | ||
638dd5f4 | 2035 | zio_deadman_impl(pio, 0); |
8fb1ede1 BB |
2036 | |
2037 | switch (spa_get_deadman_failmode(spa)) { | |
2038 | case ZIO_FAILURE_MODE_WAIT: | |
2039 | zfs_dbgmsg("%s waiting for hung I/O to pool '%s'", tag, name); | |
2040 | break; | |
2041 | ||
2042 | case ZIO_FAILURE_MODE_CONTINUE: | |
2043 | zfs_dbgmsg("%s restarting hung I/O for pool '%s'", tag, name); | |
2044 | break; | |
2045 | ||
2046 | case ZIO_FAILURE_MODE_PANIC: | |
2047 | fm_panic("%s determined I/O to pool '%s' is hung.", tag, name); | |
2048 | break; | |
2049 | } | |
2050 | } | |
2051 | ||
b128c09f BB |
2052 | /* |
2053 | * Execute the I/O pipeline until one of the following occurs: | |
2054 | * (1) the I/O completes; (2) the pipeline stalls waiting for | |
2055 | * dependent child I/Os; (3) the I/O issues, so we're waiting | |
2056 | * for an I/O completion interrupt; (4) the I/O is delegated by | |
2057 | * vdev-level caching or aggregation; (5) the I/O is deferred | |
2058 | * due to vdev-level queueing; (6) the I/O is handed off to | |
2059 | * another thread. In all cases, the pipeline stops whenever | |
8e07b99b | 2060 | * there's no CPU work; it never burns a thread in cv_wait_io(). |
b128c09f BB |
2061 | * |
2062 | * There's no locking on io_stage because there's no legitimate way | |
2063 | * for multiple threads to be attempting to process the same I/O. | |
2064 | */ | |
428870ff | 2065 | static zio_pipe_stage_t *zio_pipeline[]; |
34dc7c2f | 2066 | |
da6b4005 NB |
2067 | /* |
2068 | * zio_execute() is a wrapper around the static function | |
2069 | * __zio_execute() so that we can force __zio_execute() to be | |
2070 | * inlined. This reduces stack overhead which is important | |
2071 | * because __zio_execute() is called recursively in several zio | |
2072 | * code paths. zio_execute() itself cannot be inlined because | |
2073 | * it is externally visible. | |
2074 | */ | |
b128c09f BB |
2075 | void |
2076 | zio_execute(zio_t *zio) | |
da6b4005 | 2077 | { |
92119cc2 BB |
2078 | fstrans_cookie_t cookie; |
2079 | ||
2080 | cookie = spl_fstrans_mark(); | |
da6b4005 | 2081 | __zio_execute(zio); |
92119cc2 | 2082 | spl_fstrans_unmark(cookie); |
da6b4005 NB |
2083 | } |
2084 | ||
b58986ee BB |
2085 | /* |
2086 | * Used to determine if in the current context the stack is sized large | |
2087 | * enough to allow zio_execute() to be called recursively. A minimum | |
2088 | * stack size of 16K is required to avoid needing to re-dispatch the zio. | |
2089 | */ | |
65c7cc49 | 2090 | static boolean_t |
b58986ee BB |
2091 | zio_execute_stack_check(zio_t *zio) |
2092 | { | |
2093 | #if !defined(HAVE_LARGE_STACKS) | |
2094 | dsl_pool_t *dp = spa_get_dsl(zio->io_spa); | |
2095 | ||
2096 | /* Executing in txg_sync_thread() context. */ | |
2097 | if (dp && curthread == dp->dp_tx.tx_sync_thread) | |
2098 | return (B_TRUE); | |
2099 | ||
2100 | /* Pool initialization outside of zio_taskq context. */ | |
2101 | if (dp && spa_is_initializing(dp->dp_spa) && | |
2102 | !zio_taskq_member(zio, ZIO_TASKQ_ISSUE) && | |
2103 | !zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH)) | |
2104 | return (B_TRUE); | |
2105 | #endif /* HAVE_LARGE_STACKS */ | |
2106 | ||
2107 | return (B_FALSE); | |
2108 | } | |
2109 | ||
da6b4005 NB |
2110 | __attribute__((always_inline)) |
2111 | static inline void | |
2112 | __zio_execute(zio_t *zio) | |
b128c09f | 2113 | { |
3dfb57a3 DB |
2114 | ASSERT3U(zio->io_queued_timestamp, >, 0); |
2115 | ||
b128c09f | 2116 | while (zio->io_stage < ZIO_STAGE_DONE) { |
428870ff BB |
2117 | enum zio_stage pipeline = zio->io_pipeline; |
2118 | enum zio_stage stage = zio->io_stage; | |
62840030 MA |
2119 | |
2120 | zio->io_executor = curthread; | |
34dc7c2f | 2121 | |
b128c09f | 2122 | ASSERT(!MUTEX_HELD(&zio->io_lock)); |
428870ff BB |
2123 | ASSERT(ISP2(stage)); |
2124 | ASSERT(zio->io_stall == NULL); | |
34dc7c2f | 2125 | |
428870ff BB |
2126 | do { |
2127 | stage <<= 1; | |
2128 | } while ((stage & pipeline) == 0); | |
b128c09f BB |
2129 | |
2130 | ASSERT(stage <= ZIO_STAGE_DONE); | |
34dc7c2f BB |
2131 | |
2132 | /* | |
b128c09f BB |
2133 | * If we are in interrupt context and this pipeline stage |
2134 | * will grab a config lock that is held across I/O, | |
428870ff BB |
2135 | * or may wait for an I/O that needs an interrupt thread |
2136 | * to complete, issue async to avoid deadlock. | |
2137 | * | |
2138 | * For VDEV_IO_START, we cut in line so that the io will | |
2139 | * be sent to disk promptly. | |
34dc7c2f | 2140 | */ |
91579709 BB |
2141 | if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && |
2142 | zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { | |
b58986ee BB |
2143 | boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? |
2144 | zio_requeue_io_start_cut_in_line : B_FALSE; | |
91579709 BB |
2145 | zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); |
2146 | return; | |
2147 | } | |
2148 | ||
2149 | /* | |
b58986ee BB |
2150 | * If the current context doesn't have large enough stacks |
2151 | * the zio must be issued asynchronously to prevent overflow. | |
91579709 | 2152 | */ |
b58986ee BB |
2153 | if (zio_execute_stack_check(zio)) { |
2154 | boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? | |
2155 | zio_requeue_io_start_cut_in_line : B_FALSE; | |
428870ff | 2156 | zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); |
b128c09f | 2157 | return; |
34dc7c2f BB |
2158 | } |
2159 | ||
b128c09f | 2160 | zio->io_stage = stage; |
3dfb57a3 | 2161 | zio->io_pipeline_trace |= zio->io_stage; |
34dc7c2f | 2162 | |
62840030 MA |
2163 | /* |
2164 | * The zio pipeline stage returns the next zio to execute | |
2165 | * (typically the same as this one), or NULL if we should | |
2166 | * stop. | |
2167 | */ | |
2168 | zio = zio_pipeline[highbit64(stage) - 1](zio); | |
34dc7c2f | 2169 | |
62840030 MA |
2170 | if (zio == NULL) |
2171 | return; | |
b128c09f | 2172 | } |
34dc7c2f BB |
2173 | } |
2174 | ||
da6b4005 | 2175 | |
b128c09f BB |
2176 | /* |
2177 | * ========================================================================== | |
2178 | * Initiate I/O, either sync or async | |
2179 | * ========================================================================== | |
2180 | */ | |
2181 | int | |
2182 | zio_wait(zio_t *zio) | |
34dc7c2f | 2183 | { |
9cdf7b1f MA |
2184 | /* |
2185 | * Some routines, like zio_free_sync(), may return a NULL zio | |
2186 | * to avoid the performance overhead of creating and then destroying | |
2187 | * an unneeded zio. For the callers' simplicity, we accept a NULL | |
2188 | * zio and ignore it. | |
2189 | */ | |
2190 | if (zio == NULL) | |
2191 | return (0); | |
2192 | ||
8fb1ede1 | 2193 | long timeout = MSEC_TO_TICK(zfs_deadman_ziotime_ms); |
b128c09f | 2194 | int error; |
34dc7c2f | 2195 | |
1ce23dca PS |
2196 | ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN); |
2197 | ASSERT3P(zio->io_executor, ==, NULL); | |
34dc7c2f | 2198 | |
b128c09f | 2199 | zio->io_waiter = curthread; |
3dfb57a3 DB |
2200 | ASSERT0(zio->io_queued_timestamp); |
2201 | zio->io_queued_timestamp = gethrtime(); | |
34dc7c2f | 2202 | |
da6b4005 | 2203 | __zio_execute(zio); |
34dc7c2f | 2204 | |
b128c09f | 2205 | mutex_enter(&zio->io_lock); |
8fb1ede1 BB |
2206 | while (zio->io_executor != NULL) { |
2207 | error = cv_timedwait_io(&zio->io_cv, &zio->io_lock, | |
2208 | ddi_get_lbolt() + timeout); | |
2209 | ||
2210 | if (zfs_deadman_enabled && error == -1 && | |
2211 | gethrtime() - zio->io_queued_timestamp > | |
2212 | spa_deadman_ziotime(zio->io_spa)) { | |
2213 | mutex_exit(&zio->io_lock); | |
2214 | timeout = MSEC_TO_TICK(zfs_deadman_checktime_ms); | |
2215 | zio_deadman(zio, FTAG); | |
2216 | mutex_enter(&zio->io_lock); | |
2217 | } | |
2218 | } | |
b128c09f | 2219 | mutex_exit(&zio->io_lock); |
34dc7c2f | 2220 | |
b128c09f BB |
2221 | error = zio->io_error; |
2222 | zio_destroy(zio); | |
34dc7c2f | 2223 | |
b128c09f BB |
2224 | return (error); |
2225 | } | |
34dc7c2f | 2226 | |
b128c09f BB |
2227 | void |
2228 | zio_nowait(zio_t *zio) | |
2229 | { | |
9cdf7b1f MA |
2230 | /* |
2231 | * See comment in zio_wait(). | |
2232 | */ | |
2233 | if (zio == NULL) | |
2234 | return; | |
2235 | ||
1ce23dca | 2236 | ASSERT3P(zio->io_executor, ==, NULL); |
34dc7c2f | 2237 | |
d164b209 BB |
2238 | if (zio->io_child_type == ZIO_CHILD_LOGICAL && |
2239 | zio_unique_parent(zio) == NULL) { | |
8878261f BB |
2240 | zio_t *pio; |
2241 | ||
34dc7c2f | 2242 | /* |
b128c09f | 2243 | * This is a logical async I/O with no parent to wait for it. |
9babb374 BB |
2244 | * We add it to the spa_async_root_zio "Godfather" I/O which |
2245 | * will ensure they complete prior to unloading the pool. | |
34dc7c2f | 2246 | */ |
b128c09f | 2247 | spa_t *spa = zio->io_spa; |
09eb36ce | 2248 | pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE]; |
9babb374 | 2249 | |
8878261f | 2250 | zio_add_child(pio, zio); |
b128c09f | 2251 | } |
34dc7c2f | 2252 | |
3dfb57a3 DB |
2253 | ASSERT0(zio->io_queued_timestamp); |
2254 | zio->io_queued_timestamp = gethrtime(); | |
da6b4005 | 2255 | __zio_execute(zio); |
b128c09f | 2256 | } |
34dc7c2f | 2257 | |
b128c09f BB |
2258 | /* |
2259 | * ========================================================================== | |
1ce23dca | 2260 | * Reexecute, cancel, or suspend/resume failed I/O |
b128c09f BB |
2261 | * ========================================================================== |
2262 | */ | |
34dc7c2f | 2263 | |
b128c09f BB |
2264 | static void |
2265 | zio_reexecute(zio_t *pio) | |
2266 | { | |
d164b209 BB |
2267 | zio_t *cio, *cio_next; |
2268 | ||
2269 | ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); | |
2270 | ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); | |
9babb374 BB |
2271 | ASSERT(pio->io_gang_leader == NULL); |
2272 | ASSERT(pio->io_gang_tree == NULL); | |
34dc7c2f | 2273 | |
b128c09f BB |
2274 | pio->io_flags = pio->io_orig_flags; |
2275 | pio->io_stage = pio->io_orig_stage; | |
2276 | pio->io_pipeline = pio->io_orig_pipeline; | |
2277 | pio->io_reexecute = 0; | |
03c6040b | 2278 | pio->io_flags |= ZIO_FLAG_REEXECUTED; |
3dfb57a3 | 2279 | pio->io_pipeline_trace = 0; |
b128c09f | 2280 | pio->io_error = 0; |
1c27024e | 2281 | for (int w = 0; w < ZIO_WAIT_TYPES; w++) |
d164b209 | 2282 | pio->io_state[w] = 0; |
1c27024e | 2283 | for (int c = 0; c < ZIO_CHILD_TYPES; c++) |
b128c09f | 2284 | pio->io_child_error[c] = 0; |
34dc7c2f | 2285 | |
428870ff BB |
2286 | if (IO_IS_ALLOCATING(pio)) |
2287 | BP_ZERO(pio->io_bp); | |
34dc7c2f | 2288 | |
b128c09f BB |
2289 | /* |
2290 | * As we reexecute pio's children, new children could be created. | |
d164b209 | 2291 | * New children go to the head of pio's io_child_list, however, |
b128c09f | 2292 | * so we will (correctly) not reexecute them. The key is that |
d164b209 BB |
2293 | * the remainder of pio's io_child_list, from 'cio_next' onward, |
2294 | * cannot be affected by any side effects of reexecuting 'cio'. | |
b128c09f | 2295 | */ |
1c27024e | 2296 | zio_link_t *zl = NULL; |
a8b2e306 | 2297 | mutex_enter(&pio->io_lock); |
3dfb57a3 DB |
2298 | for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { |
2299 | cio_next = zio_walk_children(pio, &zl); | |
1c27024e | 2300 | for (int w = 0; w < ZIO_WAIT_TYPES; w++) |
d164b209 | 2301 | pio->io_children[cio->io_child_type][w]++; |
b128c09f | 2302 | mutex_exit(&pio->io_lock); |
d164b209 | 2303 | zio_reexecute(cio); |
a8b2e306 | 2304 | mutex_enter(&pio->io_lock); |
34dc7c2f | 2305 | } |
a8b2e306 | 2306 | mutex_exit(&pio->io_lock); |
34dc7c2f | 2307 | |
b128c09f BB |
2308 | /* |
2309 | * Now that all children have been reexecuted, execute the parent. | |
9babb374 | 2310 | * We don't reexecute "The Godfather" I/O here as it's the |
9e2c3bb4 | 2311 | * responsibility of the caller to wait on it. |
b128c09f | 2312 | */ |
3dfb57a3 DB |
2313 | if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) { |
2314 | pio->io_queued_timestamp = gethrtime(); | |
da6b4005 | 2315 | __zio_execute(pio); |
3dfb57a3 | 2316 | } |
34dc7c2f BB |
2317 | } |
2318 | ||
b128c09f | 2319 | void |
cec3a0a1 | 2320 | zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason) |
34dc7c2f | 2321 | { |
b128c09f BB |
2322 | if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) |
2323 | fm_panic("Pool '%s' has encountered an uncorrectable I/O " | |
2324 | "failure and the failure mode property for this pool " | |
2325 | "is set to panic.", spa_name(spa)); | |
34dc7c2f | 2326 | |
bf89c199 BB |
2327 | cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable I/O " |
2328 | "failure and has been suspended.\n", spa_name(spa)); | |
2329 | ||
1144586b | 2330 | (void) zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, |
4f072827 | 2331 | NULL, NULL, 0); |
34dc7c2f | 2332 | |
b128c09f | 2333 | mutex_enter(&spa->spa_suspend_lock); |
34dc7c2f | 2334 | |
b128c09f | 2335 | if (spa->spa_suspend_zio_root == NULL) |
9babb374 BB |
2336 | spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, |
2337 | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | | |
2338 | ZIO_FLAG_GODFATHER); | |
34dc7c2f | 2339 | |
cec3a0a1 | 2340 | spa->spa_suspended = reason; |
34dc7c2f | 2341 | |
b128c09f | 2342 | if (zio != NULL) { |
9babb374 | 2343 | ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); |
b128c09f BB |
2344 | ASSERT(zio != spa->spa_suspend_zio_root); |
2345 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
d164b209 | 2346 | ASSERT(zio_unique_parent(zio) == NULL); |
b128c09f BB |
2347 | ASSERT(zio->io_stage == ZIO_STAGE_DONE); |
2348 | zio_add_child(spa->spa_suspend_zio_root, zio); | |
2349 | } | |
34dc7c2f | 2350 | |
b128c09f BB |
2351 | mutex_exit(&spa->spa_suspend_lock); |
2352 | } | |
34dc7c2f | 2353 | |
9babb374 | 2354 | int |
b128c09f BB |
2355 | zio_resume(spa_t *spa) |
2356 | { | |
9babb374 | 2357 | zio_t *pio; |
34dc7c2f BB |
2358 | |
2359 | /* | |
b128c09f | 2360 | * Reexecute all previously suspended i/o. |
34dc7c2f | 2361 | */ |
b128c09f | 2362 | mutex_enter(&spa->spa_suspend_lock); |
cec3a0a1 | 2363 | spa->spa_suspended = ZIO_SUSPEND_NONE; |
b128c09f BB |
2364 | cv_broadcast(&spa->spa_suspend_cv); |
2365 | pio = spa->spa_suspend_zio_root; | |
2366 | spa->spa_suspend_zio_root = NULL; | |
2367 | mutex_exit(&spa->spa_suspend_lock); | |
2368 | ||
2369 | if (pio == NULL) | |
9babb374 | 2370 | return (0); |
34dc7c2f | 2371 | |
9babb374 BB |
2372 | zio_reexecute(pio); |
2373 | return (zio_wait(pio)); | |
b128c09f BB |
2374 | } |
2375 | ||
2376 | void | |
2377 | zio_resume_wait(spa_t *spa) | |
2378 | { | |
2379 | mutex_enter(&spa->spa_suspend_lock); | |
2380 | while (spa_suspended(spa)) | |
2381 | cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); | |
2382 | mutex_exit(&spa->spa_suspend_lock); | |
34dc7c2f BB |
2383 | } |
2384 | ||
2385 | /* | |
2386 | * ========================================================================== | |
b128c09f BB |
2387 | * Gang blocks. |
2388 | * | |
2389 | * A gang block is a collection of small blocks that looks to the DMU | |
2390 | * like one large block. When zio_dva_allocate() cannot find a block | |
2391 | * of the requested size, due to either severe fragmentation or the pool | |
2392 | * being nearly full, it calls zio_write_gang_block() to construct the | |
2393 | * block from smaller fragments. | |
2394 | * | |
2395 | * A gang block consists of a gang header (zio_gbh_phys_t) and up to | |
2396 | * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like | |
2397 | * an indirect block: it's an array of block pointers. It consumes | |
2398 | * only one sector and hence is allocatable regardless of fragmentation. | |
2399 | * The gang header's bps point to its gang members, which hold the data. | |
2400 | * | |
2401 | * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> | |
2402 | * as the verifier to ensure uniqueness of the SHA256 checksum. | |
2403 | * Critically, the gang block bp's blk_cksum is the checksum of the data, | |
2404 | * not the gang header. This ensures that data block signatures (needed for | |
2405 | * deduplication) are independent of how the block is physically stored. | |
2406 | * | |
2407 | * Gang blocks can be nested: a gang member may itself be a gang block. | |
2408 | * Thus every gang block is a tree in which root and all interior nodes are | |
2409 | * gang headers, and the leaves are normal blocks that contain user data. | |
2410 | * The root of the gang tree is called the gang leader. | |
2411 | * | |
2412 | * To perform any operation (read, rewrite, free, claim) on a gang block, | |
2413 | * zio_gang_assemble() first assembles the gang tree (minus data leaves) | |
2414 | * in the io_gang_tree field of the original logical i/o by recursively | |
2415 | * reading the gang leader and all gang headers below it. This yields | |
2416 | * an in-core tree containing the contents of every gang header and the | |
2417 | * bps for every constituent of the gang block. | |
2418 | * | |
2419 | * With the gang tree now assembled, zio_gang_issue() just walks the gang tree | |
2420 | * and invokes a callback on each bp. To free a gang block, zio_gang_issue() | |
2421 | * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. | |
2422 | * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). | |
2423 | * zio_read_gang() is a wrapper around zio_read() that omits reading gang | |
2424 | * headers, since we already have those in io_gang_tree. zio_rewrite_gang() | |
2425 | * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() | |
2426 | * of the gang header plus zio_checksum_compute() of the data to update the | |
2427 | * gang header's blk_cksum as described above. | |
2428 | * | |
2429 | * The two-phase assemble/issue model solves the problem of partial failure -- | |
2430 | * what if you'd freed part of a gang block but then couldn't read the | |
2431 | * gang header for another part? Assembling the entire gang tree first | |
2432 | * ensures that all the necessary gang header I/O has succeeded before | |
2433 | * starting the actual work of free, claim, or write. Once the gang tree | |
2434 | * is assembled, free and claim are in-memory operations that cannot fail. | |
2435 | * | |
2436 | * In the event that a gang write fails, zio_dva_unallocate() walks the | |
2437 | * gang tree to immediately free (i.e. insert back into the space map) | |
2438 | * everything we've allocated. This ensures that we don't get ENOSPC | |
2439 | * errors during repeated suspend/resume cycles due to a flaky device. | |
2440 | * | |
2441 | * Gang rewrites only happen during sync-to-convergence. If we can't assemble | |
2442 | * the gang tree, we won't modify the block, so we can safely defer the free | |
2443 | * (knowing that the block is still intact). If we *can* assemble the gang | |
2444 | * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free | |
2445 | * each constituent bp and we can allocate a new block on the next sync pass. | |
2446 | * | |
2447 | * In all cases, the gang tree allows complete recovery from partial failure. | |
34dc7c2f BB |
2448 | * ========================================================================== |
2449 | */ | |
b128c09f | 2450 | |
a6255b7f DQ |
2451 | static void |
2452 | zio_gang_issue_func_done(zio_t *zio) | |
2453 | { | |
e2af2acc | 2454 | abd_free(zio->io_abd); |
a6255b7f DQ |
2455 | } |
2456 | ||
b128c09f | 2457 | static zio_t * |
a6255b7f DQ |
2458 | zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, |
2459 | uint64_t offset) | |
34dc7c2f | 2460 | { |
b128c09f BB |
2461 | if (gn != NULL) |
2462 | return (pio); | |
34dc7c2f | 2463 | |
a6255b7f DQ |
2464 | return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset), |
2465 | BP_GET_PSIZE(bp), zio_gang_issue_func_done, | |
2466 | NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), | |
b128c09f BB |
2467 | &pio->io_bookmark)); |
2468 | } | |
2469 | ||
a6255b7f DQ |
2470 | static zio_t * |
2471 | zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, | |
2472 | uint64_t offset) | |
b128c09f BB |
2473 | { |
2474 | zio_t *zio; | |
2475 | ||
2476 | if (gn != NULL) { | |
a6255b7f DQ |
2477 | abd_t *gbh_abd = |
2478 | abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); | |
b128c09f | 2479 | zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, |
a6255b7f DQ |
2480 | gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL, |
2481 | pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), | |
2482 | &pio->io_bookmark); | |
34dc7c2f | 2483 | /* |
b128c09f BB |
2484 | * As we rewrite each gang header, the pipeline will compute |
2485 | * a new gang block header checksum for it; but no one will | |
2486 | * compute a new data checksum, so we do that here. The one | |
2487 | * exception is the gang leader: the pipeline already computed | |
2488 | * its data checksum because that stage precedes gang assembly. | |
2489 | * (Presently, nothing actually uses interior data checksums; | |
2490 | * this is just good hygiene.) | |
34dc7c2f | 2491 | */ |
9babb374 | 2492 | if (gn != pio->io_gang_leader->io_gang_tree) { |
a6255b7f DQ |
2493 | abd_t *buf = abd_get_offset(data, offset); |
2494 | ||
b128c09f | 2495 | zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), |
a6255b7f DQ |
2496 | buf, BP_GET_PSIZE(bp)); |
2497 | ||
e2af2acc | 2498 | abd_free(buf); |
b128c09f | 2499 | } |
428870ff BB |
2500 | /* |
2501 | * If we are here to damage data for testing purposes, | |
2502 | * leave the GBH alone so that we can detect the damage. | |
2503 | */ | |
2504 | if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) | |
2505 | zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; | |
34dc7c2f | 2506 | } else { |
b128c09f | 2507 | zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, |
a6255b7f DQ |
2508 | abd_get_offset(data, offset), BP_GET_PSIZE(bp), |
2509 | zio_gang_issue_func_done, NULL, pio->io_priority, | |
b128c09f | 2510 | ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); |
34dc7c2f BB |
2511 | } |
2512 | ||
b128c09f BB |
2513 | return (zio); |
2514 | } | |
34dc7c2f | 2515 | |
b128c09f | 2516 | /* ARGSUSED */ |
a6255b7f DQ |
2517 | static zio_t * |
2518 | zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, | |
2519 | uint64_t offset) | |
b128c09f | 2520 | { |
9cdf7b1f MA |
2521 | zio_t *zio = zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, |
2522 | ZIO_GANG_CHILD_FLAGS(pio)); | |
2523 | if (zio == NULL) { | |
2524 | zio = zio_null(pio, pio->io_spa, | |
2525 | NULL, NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)); | |
2526 | } | |
2527 | return (zio); | |
34dc7c2f BB |
2528 | } |
2529 | ||
b128c09f | 2530 | /* ARGSUSED */ |
a6255b7f DQ |
2531 | static zio_t * |
2532 | zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data, | |
2533 | uint64_t offset) | |
34dc7c2f | 2534 | { |
b128c09f BB |
2535 | return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, |
2536 | NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); | |
2537 | } | |
2538 | ||
2539 | static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { | |
2540 | NULL, | |
2541 | zio_read_gang, | |
2542 | zio_rewrite_gang, | |
2543 | zio_free_gang, | |
2544 | zio_claim_gang, | |
2545 | NULL | |
2546 | }; | |
34dc7c2f | 2547 | |
b128c09f | 2548 | static void zio_gang_tree_assemble_done(zio_t *zio); |
34dc7c2f | 2549 | |
b128c09f BB |
2550 | static zio_gang_node_t * |
2551 | zio_gang_node_alloc(zio_gang_node_t **gnpp) | |
2552 | { | |
2553 | zio_gang_node_t *gn; | |
34dc7c2f | 2554 | |
b128c09f | 2555 | ASSERT(*gnpp == NULL); |
34dc7c2f | 2556 | |
79c76d5b | 2557 | gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); |
b128c09f BB |
2558 | gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); |
2559 | *gnpp = gn; | |
34dc7c2f | 2560 | |
b128c09f | 2561 | return (gn); |
34dc7c2f BB |
2562 | } |
2563 | ||
34dc7c2f | 2564 | static void |
b128c09f | 2565 | zio_gang_node_free(zio_gang_node_t **gnpp) |
34dc7c2f | 2566 | { |
b128c09f | 2567 | zio_gang_node_t *gn = *gnpp; |
34dc7c2f | 2568 | |
1c27024e | 2569 | for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) |
b128c09f BB |
2570 | ASSERT(gn->gn_child[g] == NULL); |
2571 | ||
2572 | zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); | |
2573 | kmem_free(gn, sizeof (*gn)); | |
2574 | *gnpp = NULL; | |
34dc7c2f BB |
2575 | } |
2576 | ||
b128c09f BB |
2577 | static void |
2578 | zio_gang_tree_free(zio_gang_node_t **gnpp) | |
34dc7c2f | 2579 | { |
b128c09f | 2580 | zio_gang_node_t *gn = *gnpp; |
34dc7c2f | 2581 | |
b128c09f BB |
2582 | if (gn == NULL) |
2583 | return; | |
34dc7c2f | 2584 | |
1c27024e | 2585 | for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) |
b128c09f | 2586 | zio_gang_tree_free(&gn->gn_child[g]); |
34dc7c2f | 2587 | |
b128c09f | 2588 | zio_gang_node_free(gnpp); |
34dc7c2f BB |
2589 | } |
2590 | ||
b128c09f | 2591 | static void |
9babb374 | 2592 | zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) |
34dc7c2f | 2593 | { |
b128c09f | 2594 | zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); |
a6255b7f | 2595 | abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE); |
b128c09f | 2596 | |
9babb374 | 2597 | ASSERT(gio->io_gang_leader == gio); |
b128c09f | 2598 | ASSERT(BP_IS_GANG(bp)); |
34dc7c2f | 2599 | |
a6255b7f DQ |
2600 | zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE, |
2601 | zio_gang_tree_assemble_done, gn, gio->io_priority, | |
2602 | ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); | |
b128c09f | 2603 | } |
34dc7c2f | 2604 | |
b128c09f BB |
2605 | static void |
2606 | zio_gang_tree_assemble_done(zio_t *zio) | |
2607 | { | |
9babb374 | 2608 | zio_t *gio = zio->io_gang_leader; |
b128c09f BB |
2609 | zio_gang_node_t *gn = zio->io_private; |
2610 | blkptr_t *bp = zio->io_bp; | |
34dc7c2f | 2611 | |
9babb374 | 2612 | ASSERT(gio == zio_unique_parent(zio)); |
428870ff | 2613 | ASSERT(zio->io_child_count == 0); |
34dc7c2f | 2614 | |
b128c09f BB |
2615 | if (zio->io_error) |
2616 | return; | |
34dc7c2f | 2617 | |
a6255b7f | 2618 | /* this ABD was created from a linear buf in zio_gang_tree_assemble */ |
b128c09f | 2619 | if (BP_SHOULD_BYTESWAP(bp)) |
a6255b7f | 2620 | byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size); |
34dc7c2f | 2621 | |
a6255b7f | 2622 | ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh); |
b128c09f | 2623 | ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); |
428870ff | 2624 | ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); |
34dc7c2f | 2625 | |
e2af2acc | 2626 | abd_free(zio->io_abd); |
a6255b7f | 2627 | |
1c27024e | 2628 | for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { |
b128c09f BB |
2629 | blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; |
2630 | if (!BP_IS_GANG(gbp)) | |
2631 | continue; | |
9babb374 | 2632 | zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); |
b128c09f | 2633 | } |
34dc7c2f BB |
2634 | } |
2635 | ||
b128c09f | 2636 | static void |
a6255b7f DQ |
2637 | zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data, |
2638 | uint64_t offset) | |
34dc7c2f | 2639 | { |
9babb374 | 2640 | zio_t *gio = pio->io_gang_leader; |
b128c09f | 2641 | zio_t *zio; |
34dc7c2f | 2642 | |
b128c09f | 2643 | ASSERT(BP_IS_GANG(bp) == !!gn); |
9babb374 BB |
2644 | ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); |
2645 | ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); | |
34dc7c2f | 2646 | |
b128c09f BB |
2647 | /* |
2648 | * If you're a gang header, your data is in gn->gn_gbh. | |
2649 | * If you're a gang member, your data is in 'data' and gn == NULL. | |
2650 | */ | |
a6255b7f | 2651 | zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset); |
34dc7c2f | 2652 | |
b128c09f | 2653 | if (gn != NULL) { |
428870ff | 2654 | ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); |
34dc7c2f | 2655 | |
1c27024e | 2656 | for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { |
b128c09f BB |
2657 | blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; |
2658 | if (BP_IS_HOLE(gbp)) | |
2659 | continue; | |
a6255b7f DQ |
2660 | zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data, |
2661 | offset); | |
2662 | offset += BP_GET_PSIZE(gbp); | |
b128c09f | 2663 | } |
34dc7c2f BB |
2664 | } |
2665 | ||
9babb374 | 2666 | if (gn == gio->io_gang_tree) |
a6255b7f | 2667 | ASSERT3U(gio->io_size, ==, offset); |
34dc7c2f | 2668 | |
b128c09f BB |
2669 | if (zio != pio) |
2670 | zio_nowait(zio); | |
34dc7c2f BB |
2671 | } |
2672 | ||
62840030 | 2673 | static zio_t * |
b128c09f | 2674 | zio_gang_assemble(zio_t *zio) |
34dc7c2f | 2675 | { |
b128c09f | 2676 | blkptr_t *bp = zio->io_bp; |
34dc7c2f | 2677 | |
9babb374 BB |
2678 | ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); |
2679 | ASSERT(zio->io_child_type > ZIO_CHILD_GANG); | |
2680 | ||
2681 | zio->io_gang_leader = zio; | |
34dc7c2f | 2682 | |
b128c09f | 2683 | zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); |
34dc7c2f | 2684 | |
62840030 | 2685 | return (zio); |
34dc7c2f BB |
2686 | } |
2687 | ||
62840030 | 2688 | static zio_t * |
b128c09f | 2689 | zio_gang_issue(zio_t *zio) |
34dc7c2f | 2690 | { |
b128c09f | 2691 | blkptr_t *bp = zio->io_bp; |
34dc7c2f | 2692 | |
ddc751d5 | 2693 | if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) { |
62840030 | 2694 | return (NULL); |
ddc751d5 | 2695 | } |
34dc7c2f | 2696 | |
9babb374 BB |
2697 | ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); |
2698 | ASSERT(zio->io_child_type > ZIO_CHILD_GANG); | |
34dc7c2f | 2699 | |
b128c09f | 2700 | if (zio->io_child_error[ZIO_CHILD_GANG] == 0) |
a6255b7f DQ |
2701 | zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd, |
2702 | 0); | |
b128c09f | 2703 | else |
9babb374 | 2704 | zio_gang_tree_free(&zio->io_gang_tree); |
34dc7c2f | 2705 | |
b128c09f | 2706 | zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; |
34dc7c2f | 2707 | |
62840030 | 2708 | return (zio); |
34dc7c2f BB |
2709 | } |
2710 | ||
2711 | static void | |
b128c09f | 2712 | zio_write_gang_member_ready(zio_t *zio) |
34dc7c2f | 2713 | { |
d164b209 | 2714 | zio_t *pio = zio_unique_parent(zio); |
34dc7c2f BB |
2715 | dva_t *cdva = zio->io_bp->blk_dva; |
2716 | dva_t *pdva = pio->io_bp->blk_dva; | |
2717 | uint64_t asize; | |
2a8ba608 | 2718 | zio_t *gio __maybe_unused = zio->io_gang_leader; |
34dc7c2f | 2719 | |
b128c09f BB |
2720 | if (BP_IS_HOLE(zio->io_bp)) |
2721 | return; | |
2722 | ||
2723 | ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); | |
2724 | ||
2725 | ASSERT(zio->io_child_type == ZIO_CHILD_GANG); | |
428870ff BB |
2726 | ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); |
2727 | ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); | |
2728 | ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); | |
34dc7c2f | 2729 | ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); |
34dc7c2f BB |
2730 | |
2731 | mutex_enter(&pio->io_lock); | |
1c27024e | 2732 | for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { |
34dc7c2f BB |
2733 | ASSERT(DVA_GET_GANG(&pdva[d])); |
2734 | asize = DVA_GET_ASIZE(&pdva[d]); | |
2735 | asize += DVA_GET_ASIZE(&cdva[d]); | |
2736 | DVA_SET_ASIZE(&pdva[d], asize); | |
2737 | } | |
2738 | mutex_exit(&pio->io_lock); | |
2739 | } | |
2740 | ||
a6255b7f DQ |
2741 | static void |
2742 | zio_write_gang_done(zio_t *zio) | |
2743 | { | |
c955398b BL |
2744 | /* |
2745 | * The io_abd field will be NULL for a zio with no data. The io_flags | |
2746 | * will initially have the ZIO_FLAG_NODATA bit flag set, but we can't | |
2747 | * check for it here as it is cleared in zio_ready. | |
2748 | */ | |
2749 | if (zio->io_abd != NULL) | |
e2af2acc | 2750 | abd_free(zio->io_abd); |
a6255b7f DQ |
2751 | } |
2752 | ||
62840030 | 2753 | static zio_t * |
aa755b35 | 2754 | zio_write_gang_block(zio_t *pio, metaslab_class_t *mc) |
34dc7c2f | 2755 | { |
b128c09f BB |
2756 | spa_t *spa = pio->io_spa; |
2757 | blkptr_t *bp = pio->io_bp; | |
9babb374 | 2758 | zio_t *gio = pio->io_gang_leader; |
b128c09f BB |
2759 | zio_t *zio; |
2760 | zio_gang_node_t *gn, **gnpp; | |
34dc7c2f | 2761 | zio_gbh_phys_t *gbh; |
a6255b7f | 2762 | abd_t *gbh_abd; |
b128c09f BB |
2763 | uint64_t txg = pio->io_txg; |
2764 | uint64_t resid = pio->io_size; | |
2765 | uint64_t lsize; | |
428870ff | 2766 | int copies = gio->io_prop.zp_copies; |
b5256303 | 2767 | int gbh_copies; |
b128c09f | 2768 | zio_prop_t zp; |
1c27024e | 2769 | int error; |
c955398b | 2770 | boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA); |
b5256303 TC |
2771 | |
2772 | /* | |
2773 | * encrypted blocks need DVA[2] free so encrypted gang headers can't | |
2774 | * have a third copy. | |
2775 | */ | |
2776 | gbh_copies = MIN(copies + 1, spa_max_replication(spa)); | |
2777 | if (gio->io_prop.zp_encrypt && gbh_copies >= SPA_DVAS_PER_BP) | |
2778 | gbh_copies = SPA_DVAS_PER_BP - 1; | |
2779 | ||
1c27024e | 2780 | int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER; |
3dfb57a3 DB |
2781 | if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { |
2782 | ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); | |
c955398b | 2783 | ASSERT(has_data); |
3dfb57a3 DB |
2784 | |
2785 | flags |= METASLAB_ASYNC_ALLOC; | |
f8020c93 AM |
2786 | VERIFY(zfs_refcount_held(&mc->mc_allocator[pio->io_allocator]. |
2787 | mca_alloc_slots, pio)); | |
3dfb57a3 DB |
2788 | |
2789 | /* | |
2790 | * The logical zio has already placed a reservation for | |
2791 | * 'copies' allocation slots but gang blocks may require | |
2792 | * additional copies. These additional copies | |
2793 | * (i.e. gbh_copies - copies) are guaranteed to succeed | |
2794 | * since metaslab_class_throttle_reserve() always allows | |
2795 | * additional reservations for gang blocks. | |
2796 | */ | |
2797 | VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies, | |
492f64e9 | 2798 | pio->io_allocator, pio, flags)); |
3dfb57a3 DB |
2799 | } |
2800 | ||
2801 | error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE, | |
4e21fd06 | 2802 | bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags, |
492f64e9 | 2803 | &pio->io_alloc_list, pio, pio->io_allocator); |
34dc7c2f | 2804 | if (error) { |
3dfb57a3 DB |
2805 | if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { |
2806 | ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); | |
c955398b | 2807 | ASSERT(has_data); |
3dfb57a3 DB |
2808 | |
2809 | /* | |
2810 | * If we failed to allocate the gang block header then | |
2811 | * we remove any additional allocation reservations that | |
2812 | * we placed here. The original reservation will | |
2813 | * be removed when the logical I/O goes to the ready | |
2814 | * stage. | |
2815 | */ | |
2816 | metaslab_class_throttle_unreserve(mc, | |
492f64e9 | 2817 | gbh_copies - copies, pio->io_allocator, pio); |
3dfb57a3 DB |
2818 | } |
2819 | ||
b128c09f | 2820 | pio->io_error = error; |
62840030 | 2821 | return (pio); |
34dc7c2f BB |
2822 | } |
2823 | ||
9babb374 BB |
2824 | if (pio == gio) { |
2825 | gnpp = &gio->io_gang_tree; | |
b128c09f BB |
2826 | } else { |
2827 | gnpp = pio->io_private; | |
2828 | ASSERT(pio->io_ready == zio_write_gang_member_ready); | |
34dc7c2f BB |
2829 | } |
2830 | ||
b128c09f BB |
2831 | gn = zio_gang_node_alloc(gnpp); |
2832 | gbh = gn->gn_gbh; | |
2833 | bzero(gbh, SPA_GANGBLOCKSIZE); | |
a6255b7f | 2834 | gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE); |
34dc7c2f | 2835 | |
b128c09f BB |
2836 | /* |
2837 | * Create the gang header. | |
2838 | */ | |
a6255b7f DQ |
2839 | zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE, |
2840 | zio_write_gang_done, NULL, pio->io_priority, | |
2841 | ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); | |
34dc7c2f | 2842 | |
b128c09f BB |
2843 | /* |
2844 | * Create and nowait the gang children. | |
2845 | */ | |
1c27024e | 2846 | for (int g = 0; resid != 0; resid -= lsize, g++) { |
b128c09f BB |
2847 | lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), |
2848 | SPA_MINBLOCKSIZE); | |
2849 | ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); | |
2850 | ||
9babb374 | 2851 | zp.zp_checksum = gio->io_prop.zp_checksum; |
b128c09f | 2852 | zp.zp_compress = ZIO_COMPRESS_OFF; |
10b3c7f5 | 2853 | zp.zp_complevel = gio->io_prop.zp_complevel; |
b128c09f BB |
2854 | zp.zp_type = DMU_OT_NONE; |
2855 | zp.zp_level = 0; | |
428870ff | 2856 | zp.zp_copies = gio->io_prop.zp_copies; |
03c6040b GW |
2857 | zp.zp_dedup = B_FALSE; |
2858 | zp.zp_dedup_verify = B_FALSE; | |
2859 | zp.zp_nopwrite = B_FALSE; | |
4807c0ba TC |
2860 | zp.zp_encrypt = gio->io_prop.zp_encrypt; |
2861 | zp.zp_byteorder = gio->io_prop.zp_byteorder; | |
b5256303 TC |
2862 | bzero(zp.zp_salt, ZIO_DATA_SALT_LEN); |
2863 | bzero(zp.zp_iv, ZIO_DATA_IV_LEN); | |
2864 | bzero(zp.zp_mac, ZIO_DATA_MAC_LEN); | |
b128c09f | 2865 | |
1c27024e | 2866 | zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g], |
c955398b BL |
2867 | has_data ? abd_get_offset(pio->io_abd, pio->io_size - |
2868 | resid) : NULL, lsize, lsize, &zp, | |
2869 | zio_write_gang_member_ready, NULL, NULL, | |
a6255b7f | 2870 | zio_write_gang_done, &gn->gn_child[g], pio->io_priority, |
3dfb57a3 DB |
2871 | ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); |
2872 | ||
2873 | if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { | |
2874 | ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); | |
c955398b | 2875 | ASSERT(has_data); |
3dfb57a3 DB |
2876 | |
2877 | /* | |
2878 | * Gang children won't throttle but we should | |
2879 | * account for their work, so reserve an allocation | |
2880 | * slot for them here. | |
2881 | */ | |
2882 | VERIFY(metaslab_class_throttle_reserve(mc, | |
492f64e9 | 2883 | zp.zp_copies, cio->io_allocator, cio, flags)); |
3dfb57a3 DB |
2884 | } |
2885 | zio_nowait(cio); | |
b128c09f | 2886 | } |
34dc7c2f BB |
2887 | |
2888 | /* | |
b128c09f | 2889 | * Set pio's pipeline to just wait for zio to finish. |
34dc7c2f | 2890 | */ |
b128c09f BB |
2891 | pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; |
2892 | ||
920dd524 ED |
2893 | /* |
2894 | * We didn't allocate this bp, so make sure it doesn't get unmarked. | |
2895 | */ | |
2896 | pio->io_flags &= ~ZIO_FLAG_FASTWRITE; | |
2897 | ||
b128c09f BB |
2898 | zio_nowait(zio); |
2899 | ||
62840030 | 2900 | return (pio); |
34dc7c2f BB |
2901 | } |
2902 | ||
03c6040b | 2903 | /* |
3c67d83a TH |
2904 | * The zio_nop_write stage in the pipeline determines if allocating a |
2905 | * new bp is necessary. The nopwrite feature can handle writes in | |
2906 | * either syncing or open context (i.e. zil writes) and as a result is | |
2907 | * mutually exclusive with dedup. | |
2908 | * | |
2909 | * By leveraging a cryptographically secure checksum, such as SHA256, we | |
2910 | * can compare the checksums of the new data and the old to determine if | |
2911 | * allocating a new block is required. Note that our requirements for | |
2912 | * cryptographic strength are fairly weak: there can't be any accidental | |
2913 | * hash collisions, but we don't need to be secure against intentional | |
2914 | * (malicious) collisions. To trigger a nopwrite, you have to be able | |
2915 | * to write the file to begin with, and triggering an incorrect (hash | |
2916 | * collision) nopwrite is no worse than simply writing to the file. | |
2917 | * That said, there are no known attacks against the checksum algorithms | |
2918 | * used for nopwrite, assuming that the salt and the checksums | |
2919 | * themselves remain secret. | |
03c6040b | 2920 | */ |
62840030 | 2921 | static zio_t * |
03c6040b GW |
2922 | zio_nop_write(zio_t *zio) |
2923 | { | |
2924 | blkptr_t *bp = zio->io_bp; | |
2925 | blkptr_t *bp_orig = &zio->io_bp_orig; | |
2926 | zio_prop_t *zp = &zio->io_prop; | |
2927 | ||
2928 | ASSERT(BP_GET_LEVEL(bp) == 0); | |
2929 | ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); | |
2930 | ASSERT(zp->zp_nopwrite); | |
2931 | ASSERT(!zp->zp_dedup); | |
2932 | ASSERT(zio->io_bp_override == NULL); | |
2933 | ASSERT(IO_IS_ALLOCATING(zio)); | |
2934 | ||
2935 | /* | |
2936 | * Check to see if the original bp and the new bp have matching | |
2937 | * characteristics (i.e. same checksum, compression algorithms, etc). | |
2938 | * If they don't then just continue with the pipeline which will | |
2939 | * allocate a new bp. | |
2940 | */ | |
2941 | if (BP_IS_HOLE(bp_orig) || | |
3c67d83a TH |
2942 | !(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags & |
2943 | ZCHECKSUM_FLAG_NOPWRITE) || | |
b5256303 | 2944 | BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) || |
03c6040b GW |
2945 | BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) || |
2946 | BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) || | |
2947 | BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) || | |
2948 | zp->zp_copies != BP_GET_NDVAS(bp_orig)) | |
62840030 | 2949 | return (zio); |
03c6040b GW |
2950 | |
2951 | /* | |
2952 | * If the checksums match then reset the pipeline so that we | |
2953 | * avoid allocating a new bp and issuing any I/O. | |
2954 | */ | |
2955 | if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) { | |
3c67d83a TH |
2956 | ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags & |
2957 | ZCHECKSUM_FLAG_NOPWRITE); | |
03c6040b GW |
2958 | ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig)); |
2959 | ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig)); | |
2960 | ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF); | |
2961 | ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop, | |
2962 | sizeof (uint64_t)) == 0); | |
2963 | ||
681a85cb GW |
2964 | /* |
2965 | * If we're overwriting a block that is currently on an | |
2966 | * indirect vdev, then ignore the nopwrite request and | |
2967 | * allow a new block to be allocated on a concrete vdev. | |
2968 | */ | |
2969 | spa_config_enter(zio->io_spa, SCL_VDEV, FTAG, RW_READER); | |
2970 | vdev_t *tvd = vdev_lookup_top(zio->io_spa, | |
2971 | DVA_GET_VDEV(&bp->blk_dva[0])); | |
2972 | if (tvd->vdev_ops == &vdev_indirect_ops) { | |
2973 | spa_config_exit(zio->io_spa, SCL_VDEV, FTAG); | |
2974 | return (zio); | |
2975 | } | |
2976 | spa_config_exit(zio->io_spa, SCL_VDEV, FTAG); | |
2977 | ||
03c6040b GW |
2978 | *bp = *bp_orig; |
2979 | zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; | |
2980 | zio->io_flags |= ZIO_FLAG_NOPWRITE; | |
2981 | } | |
2982 | ||
62840030 | 2983 | return (zio); |
03c6040b GW |
2984 | } |
2985 | ||
34dc7c2f BB |
2986 | /* |
2987 | * ========================================================================== | |
428870ff | 2988 | * Dedup |
34dc7c2f BB |
2989 | * ========================================================================== |
2990 | */ | |
428870ff BB |
2991 | static void |
2992 | zio_ddt_child_read_done(zio_t *zio) | |
2993 | { | |
2994 | blkptr_t *bp = zio->io_bp; | |
2995 | ddt_entry_t *dde = zio->io_private; | |
2996 | ddt_phys_t *ddp; | |
2997 | zio_t *pio = zio_unique_parent(zio); | |
2998 | ||
2999 | mutex_enter(&pio->io_lock); | |
3000 | ddp = ddt_phys_select(dde, bp); | |
3001 | if (zio->io_error == 0) | |
3002 | ddt_phys_clear(ddp); /* this ddp doesn't need repair */ | |
a6255b7f DQ |
3003 | |
3004 | if (zio->io_error == 0 && dde->dde_repair_abd == NULL) | |
3005 | dde->dde_repair_abd = zio->io_abd; | |
428870ff | 3006 | else |
a6255b7f | 3007 | abd_free(zio->io_abd); |
428870ff BB |
3008 | mutex_exit(&pio->io_lock); |
3009 | } | |
3010 | ||
62840030 | 3011 | static zio_t * |
428870ff BB |
3012 | zio_ddt_read_start(zio_t *zio) |
3013 | { | |
3014 | blkptr_t *bp = zio->io_bp; | |
3015 | ||
3016 | ASSERT(BP_GET_DEDUP(bp)); | |
3017 | ASSERT(BP_GET_PSIZE(bp) == zio->io_size); | |
3018 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
3019 | ||
3020 | if (zio->io_child_error[ZIO_CHILD_DDT]) { | |
3021 | ddt_t *ddt = ddt_select(zio->io_spa, bp); | |
3022 | ddt_entry_t *dde = ddt_repair_start(ddt, bp); | |
3023 | ddt_phys_t *ddp = dde->dde_phys; | |
3024 | ddt_phys_t *ddp_self = ddt_phys_select(dde, bp); | |
3025 | blkptr_t blk; | |
3026 | ||
3027 | ASSERT(zio->io_vsd == NULL); | |
3028 | zio->io_vsd = dde; | |
3029 | ||
3030 | if (ddp_self == NULL) | |
62840030 | 3031 | return (zio); |
428870ff | 3032 | |
1c27024e | 3033 | for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { |
428870ff BB |
3034 | if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) |
3035 | continue; | |
3036 | ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, | |
3037 | &blk); | |
3038 | zio_nowait(zio_read(zio, zio->io_spa, &blk, | |
a6255b7f DQ |
3039 | abd_alloc_for_io(zio->io_size, B_TRUE), |
3040 | zio->io_size, zio_ddt_child_read_done, dde, | |
3041 | zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) | | |
3042 | ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark)); | |
428870ff | 3043 | } |
62840030 | 3044 | return (zio); |
428870ff BB |
3045 | } |
3046 | ||
3047 | zio_nowait(zio_read(zio, zio->io_spa, bp, | |
a6255b7f | 3048 | zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority, |
428870ff BB |
3049 | ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); |
3050 | ||
62840030 | 3051 | return (zio); |
428870ff BB |
3052 | } |
3053 | ||
62840030 | 3054 | static zio_t * |
428870ff BB |
3055 | zio_ddt_read_done(zio_t *zio) |
3056 | { | |
3057 | blkptr_t *bp = zio->io_bp; | |
3058 | ||
ddc751d5 | 3059 | if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) { |
62840030 | 3060 | return (NULL); |
ddc751d5 | 3061 | } |
428870ff BB |
3062 | |
3063 | ASSERT(BP_GET_DEDUP(bp)); | |
3064 | ASSERT(BP_GET_PSIZE(bp) == zio->io_size); | |
3065 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
3066 | ||
3067 | if (zio->io_child_error[ZIO_CHILD_DDT]) { | |
3068 | ddt_t *ddt = ddt_select(zio->io_spa, bp); | |
3069 | ddt_entry_t *dde = zio->io_vsd; | |
3070 | if (ddt == NULL) { | |
3071 | ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); | |
62840030 | 3072 | return (zio); |
428870ff BB |
3073 | } |
3074 | if (dde == NULL) { | |
3075 | zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; | |
3076 | zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); | |
62840030 | 3077 | return (NULL); |
428870ff | 3078 | } |
a6255b7f DQ |
3079 | if (dde->dde_repair_abd != NULL) { |
3080 | abd_copy(zio->io_abd, dde->dde_repair_abd, | |
3081 | zio->io_size); | |
428870ff BB |
3082 | zio->io_child_error[ZIO_CHILD_DDT] = 0; |
3083 | } | |
3084 | ddt_repair_done(ddt, dde); | |
3085 | zio->io_vsd = NULL; | |
3086 | } | |
3087 | ||
3088 | ASSERT(zio->io_vsd == NULL); | |
3089 | ||
62840030 | 3090 | return (zio); |
428870ff BB |
3091 | } |
3092 | ||
3093 | static boolean_t | |
3094 | zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) | |
3095 | { | |
3096 | spa_t *spa = zio->io_spa; | |
c17bcf83 | 3097 | boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW); |
428870ff | 3098 | |
c17bcf83 | 3099 | ASSERT(!(zio->io_bp_override && do_raw)); |
2aa34383 | 3100 | |
428870ff BB |
3101 | /* |
3102 | * Note: we compare the original data, not the transformed data, | |
3103 | * because when zio->io_bp is an override bp, we will not have | |
3104 | * pushed the I/O transforms. That's an important optimization | |
3105 | * because otherwise we'd compress/encrypt all dmu_sync() data twice. | |
c17bcf83 | 3106 | * However, we should never get a raw, override zio so in these |
b5256303 | 3107 | * cases we can compare the io_abd directly. This is useful because |
c17bcf83 TC |
3108 | * it allows us to do dedup verification even if we don't have access |
3109 | * to the original data (for instance, if the encryption keys aren't | |
3110 | * loaded). | |
428870ff | 3111 | */ |
c17bcf83 | 3112 | |
1c27024e | 3113 | for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { |
428870ff BB |
3114 | zio_t *lio = dde->dde_lead_zio[p]; |
3115 | ||
c17bcf83 TC |
3116 | if (lio != NULL && do_raw) { |
3117 | return (lio->io_size != zio->io_size || | |
a6255b7f | 3118 | abd_cmp(zio->io_abd, lio->io_abd) != 0); |
c17bcf83 | 3119 | } else if (lio != NULL) { |
428870ff | 3120 | return (lio->io_orig_size != zio->io_orig_size || |
a6255b7f | 3121 | abd_cmp(zio->io_orig_abd, lio->io_orig_abd) != 0); |
428870ff BB |
3122 | } |
3123 | } | |
3124 | ||
1c27024e | 3125 | for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { |
428870ff BB |
3126 | ddt_phys_t *ddp = &dde->dde_phys[p]; |
3127 | ||
c17bcf83 TC |
3128 | if (ddp->ddp_phys_birth != 0 && do_raw) { |
3129 | blkptr_t blk = *zio->io_bp; | |
3130 | uint64_t psize; | |
a6255b7f | 3131 | abd_t *tmpabd; |
c17bcf83 TC |
3132 | int error; |
3133 | ||
3134 | ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); | |
3135 | psize = BP_GET_PSIZE(&blk); | |
3136 | ||
3137 | if (psize != zio->io_size) | |
3138 | return (B_TRUE); | |
3139 | ||
3140 | ddt_exit(ddt); | |
3141 | ||
a6255b7f | 3142 | tmpabd = abd_alloc_for_io(psize, B_TRUE); |
c17bcf83 | 3143 | |
a6255b7f | 3144 | error = zio_wait(zio_read(NULL, spa, &blk, tmpabd, |
c17bcf83 TC |
3145 | psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ, |
3146 | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | | |
3147 | ZIO_FLAG_RAW, &zio->io_bookmark)); | |
3148 | ||
3149 | if (error == 0) { | |
a6255b7f | 3150 | if (abd_cmp(tmpabd, zio->io_abd) != 0) |
c17bcf83 TC |
3151 | error = SET_ERROR(ENOENT); |
3152 | } | |
3153 | ||
a6255b7f | 3154 | abd_free(tmpabd); |
c17bcf83 TC |
3155 | ddt_enter(ddt); |
3156 | return (error != 0); | |
3157 | } else if (ddp->ddp_phys_birth != 0) { | |
428870ff | 3158 | arc_buf_t *abuf = NULL; |
2a432414 | 3159 | arc_flags_t aflags = ARC_FLAG_WAIT; |
428870ff BB |
3160 | blkptr_t blk = *zio->io_bp; |
3161 | int error; | |
3162 | ||
3163 | ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); | |
3164 | ||
c17bcf83 TC |
3165 | if (BP_GET_LSIZE(&blk) != zio->io_orig_size) |
3166 | return (B_TRUE); | |
3167 | ||
428870ff BB |
3168 | ddt_exit(ddt); |
3169 | ||
294f6806 | 3170 | error = arc_read(NULL, spa, &blk, |
428870ff BB |
3171 | arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, |
3172 | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, | |
3173 | &aflags, &zio->io_bookmark); | |
3174 | ||
3175 | if (error == 0) { | |
a6255b7f | 3176 | if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data, |
428870ff | 3177 | zio->io_orig_size) != 0) |
c17bcf83 | 3178 | error = SET_ERROR(ENOENT); |
d3c2ae1c | 3179 | arc_buf_destroy(abuf, &abuf); |
428870ff BB |
3180 | } |
3181 | ||
3182 | ddt_enter(ddt); | |
3183 | return (error != 0); | |
3184 | } | |
3185 | } | |
3186 | ||
3187 | return (B_FALSE); | |
3188 | } | |
3189 | ||
3190 | static void | |
3191 | zio_ddt_child_write_ready(zio_t *zio) | |
3192 | { | |
3193 | int p = zio->io_prop.zp_copies; | |
3194 | ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); | |
3195 | ddt_entry_t *dde = zio->io_private; | |
3196 | ddt_phys_t *ddp = &dde->dde_phys[p]; | |
3197 | zio_t *pio; | |
3198 | ||
3199 | if (zio->io_error) | |
3200 | return; | |
3201 | ||
3202 | ddt_enter(ddt); | |
3203 | ||
3204 | ASSERT(dde->dde_lead_zio[p] == zio); | |
3205 | ||
3206 | ddt_phys_fill(ddp, zio->io_bp); | |
3207 | ||
1c27024e | 3208 | zio_link_t *zl = NULL; |
3dfb57a3 | 3209 | while ((pio = zio_walk_parents(zio, &zl)) != NULL) |
428870ff BB |
3210 | ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); |
3211 | ||
3212 | ddt_exit(ddt); | |
3213 | } | |
3214 | ||
3215 | static void | |
3216 | zio_ddt_child_write_done(zio_t *zio) | |
3217 | { | |
3218 | int p = zio->io_prop.zp_copies; | |
3219 | ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); | |
3220 | ddt_entry_t *dde = zio->io_private; | |
3221 | ddt_phys_t *ddp = &dde->dde_phys[p]; | |
3222 | ||
3223 | ddt_enter(ddt); | |
3224 | ||
3225 | ASSERT(ddp->ddp_refcnt == 0); | |
3226 | ASSERT(dde->dde_lead_zio[p] == zio); | |
3227 | dde->dde_lead_zio[p] = NULL; | |
3228 | ||
3229 | if (zio->io_error == 0) { | |
3dfb57a3 DB |
3230 | zio_link_t *zl = NULL; |
3231 | while (zio_walk_parents(zio, &zl) != NULL) | |
428870ff BB |
3232 | ddt_phys_addref(ddp); |
3233 | } else { | |
3234 | ddt_phys_clear(ddp); | |
3235 | } | |
3236 | ||
3237 | ddt_exit(ddt); | |
3238 | } | |
3239 | ||
62840030 | 3240 | static zio_t * |
428870ff BB |
3241 | zio_ddt_write(zio_t *zio) |
3242 | { | |
3243 | spa_t *spa = zio->io_spa; | |
3244 | blkptr_t *bp = zio->io_bp; | |
3245 | uint64_t txg = zio->io_txg; | |
3246 | zio_prop_t *zp = &zio->io_prop; | |
3247 | int p = zp->zp_copies; | |
428870ff | 3248 | zio_t *cio = NULL; |
428870ff BB |
3249 | ddt_t *ddt = ddt_select(spa, bp); |
3250 | ddt_entry_t *dde; | |
3251 | ddt_phys_t *ddp; | |
3252 | ||
3253 | ASSERT(BP_GET_DEDUP(bp)); | |
3254 | ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); | |
3255 | ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); | |
c17bcf83 | 3256 | ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW))); |
428870ff BB |
3257 | |
3258 | ddt_enter(ddt); | |
3259 | dde = ddt_lookup(ddt, bp, B_TRUE); | |
3260 | ddp = &dde->dde_phys[p]; | |
3261 | ||
3262 | if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { | |
3263 | /* | |
3264 | * If we're using a weak checksum, upgrade to a strong checksum | |
3265 | * and try again. If we're already using a strong checksum, | |
3266 | * we can't resolve it, so just convert to an ordinary write. | |
3267 | * (And automatically e-mail a paper to Nature?) | |
3268 | */ | |
3c67d83a TH |
3269 | if (!(zio_checksum_table[zp->zp_checksum].ci_flags & |
3270 | ZCHECKSUM_FLAG_DEDUP)) { | |
428870ff BB |
3271 | zp->zp_checksum = spa_dedup_checksum(spa); |
3272 | zio_pop_transforms(zio); | |
3273 | zio->io_stage = ZIO_STAGE_OPEN; | |
3274 | BP_ZERO(bp); | |
3275 | } else { | |
03c6040b | 3276 | zp->zp_dedup = B_FALSE; |
accd6d9d | 3277 | BP_SET_DEDUP(bp, B_FALSE); |
428870ff | 3278 | } |
accd6d9d | 3279 | ASSERT(!BP_GET_DEDUP(bp)); |
428870ff BB |
3280 | zio->io_pipeline = ZIO_WRITE_PIPELINE; |
3281 | ddt_exit(ddt); | |
62840030 | 3282 | return (zio); |
428870ff BB |
3283 | } |
3284 | ||
428870ff BB |
3285 | if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) { |
3286 | if (ddp->ddp_phys_birth != 0) | |
3287 | ddt_bp_fill(ddp, bp, txg); | |
3288 | if (dde->dde_lead_zio[p] != NULL) | |
3289 | zio_add_child(zio, dde->dde_lead_zio[p]); | |
3290 | else | |
3291 | ddt_phys_addref(ddp); | |
3292 | } else if (zio->io_bp_override) { | |
3293 | ASSERT(bp->blk_birth == txg); | |
3294 | ASSERT(BP_EQUAL(bp, zio->io_bp_override)); | |
3295 | ddt_phys_fill(ddp, bp); | |
3296 | ddt_phys_addref(ddp); | |
3297 | } else { | |
a6255b7f | 3298 | cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd, |
2aa34383 | 3299 | zio->io_orig_size, zio->io_orig_size, zp, |
bc77ba73 | 3300 | zio_ddt_child_write_ready, NULL, NULL, |
428870ff BB |
3301 | zio_ddt_child_write_done, dde, zio->io_priority, |
3302 | ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); | |
3303 | ||
a6255b7f | 3304 | zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL); |
428870ff BB |
3305 | dde->dde_lead_zio[p] = cio; |
3306 | } | |
3307 | ||
3308 | ddt_exit(ddt); | |
3309 | ||
9cdf7b1f | 3310 | zio_nowait(cio); |
428870ff | 3311 | |
62840030 | 3312 | return (zio); |
428870ff BB |
3313 | } |
3314 | ||
3315 | ddt_entry_t *freedde; /* for debugging */ | |
b128c09f | 3316 | |
62840030 | 3317 | static zio_t * |
428870ff BB |
3318 | zio_ddt_free(zio_t *zio) |
3319 | { | |
3320 | spa_t *spa = zio->io_spa; | |
3321 | blkptr_t *bp = zio->io_bp; | |
3322 | ddt_t *ddt = ddt_select(spa, bp); | |
3323 | ddt_entry_t *dde; | |
3324 | ddt_phys_t *ddp; | |
3325 | ||
3326 | ASSERT(BP_GET_DEDUP(bp)); | |
3327 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
3328 | ||
3329 | ddt_enter(ddt); | |
3330 | freedde = dde = ddt_lookup(ddt, bp, B_TRUE); | |
5dc6af0e BB |
3331 | if (dde) { |
3332 | ddp = ddt_phys_select(dde, bp); | |
3333 | if (ddp) | |
3334 | ddt_phys_decref(ddp); | |
3335 | } | |
428870ff BB |
3336 | ddt_exit(ddt); |
3337 | ||
62840030 | 3338 | return (zio); |
428870ff BB |
3339 | } |
3340 | ||
3341 | /* | |
3342 | * ========================================================================== | |
3343 | * Allocate and free blocks | |
3344 | * ========================================================================== | |
3345 | */ | |
3dfb57a3 DB |
3346 | |
3347 | static zio_t * | |
492f64e9 | 3348 | zio_io_to_allocate(spa_t *spa, int allocator) |
3dfb57a3 DB |
3349 | { |
3350 | zio_t *zio; | |
3351 | ||
492f64e9 | 3352 | ASSERT(MUTEX_HELD(&spa->spa_alloc_locks[allocator])); |
3dfb57a3 | 3353 | |
492f64e9 | 3354 | zio = avl_first(&spa->spa_alloc_trees[allocator]); |
3dfb57a3 DB |
3355 | if (zio == NULL) |
3356 | return (NULL); | |
3357 | ||
3358 | ASSERT(IO_IS_ALLOCATING(zio)); | |
3359 | ||
3360 | /* | |
3361 | * Try to place a reservation for this zio. If we're unable to | |
3362 | * reserve then we throttle. | |
3363 | */ | |
492f64e9 | 3364 | ASSERT3U(zio->io_allocator, ==, allocator); |
cc99f275 | 3365 | if (!metaslab_class_throttle_reserve(zio->io_metaslab_class, |
492f64e9 | 3366 | zio->io_prop.zp_copies, zio->io_allocator, zio, 0)) { |
3dfb57a3 DB |
3367 | return (NULL); |
3368 | } | |
3369 | ||
492f64e9 | 3370 | avl_remove(&spa->spa_alloc_trees[allocator], zio); |
3dfb57a3 DB |
3371 | ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE); |
3372 | ||
3373 | return (zio); | |
3374 | } | |
3375 | ||
62840030 | 3376 | static zio_t * |
3dfb57a3 DB |
3377 | zio_dva_throttle(zio_t *zio) |
3378 | { | |
3379 | spa_t *spa = zio->io_spa; | |
3380 | zio_t *nio; | |
cc99f275 DB |
3381 | metaslab_class_t *mc; |
3382 | ||
3383 | /* locate an appropriate allocation class */ | |
3384 | mc = spa_preferred_class(spa, zio->io_size, zio->io_prop.zp_type, | |
3385 | zio->io_prop.zp_level, zio->io_prop.zp_zpl_smallblk); | |
3dfb57a3 DB |
3386 | |
3387 | if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE || | |
cc99f275 | 3388 | !mc->mc_alloc_throttle_enabled || |
3dfb57a3 DB |
3389 | zio->io_child_type == ZIO_CHILD_GANG || |
3390 | zio->io_flags & ZIO_FLAG_NODATA) { | |
62840030 | 3391 | return (zio); |
3dfb57a3 DB |
3392 | } |
3393 | ||
3394 | ASSERT(zio->io_child_type > ZIO_CHILD_GANG); | |
3395 | ||
3396 | ASSERT3U(zio->io_queued_timestamp, >, 0); | |
3397 | ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE); | |
3398 | ||
492f64e9 PD |
3399 | zbookmark_phys_t *bm = &zio->io_bookmark; |
3400 | /* | |
3401 | * We want to try to use as many allocators as possible to help improve | |
3402 | * performance, but we also want logically adjacent IOs to be physically | |
3403 | * adjacent to improve sequential read performance. We chunk each object | |
3404 | * into 2^20 block regions, and then hash based on the objset, object, | |
3405 | * level, and region to accomplish both of these goals. | |
3406 | */ | |
3407 | zio->io_allocator = cityhash4(bm->zb_objset, bm->zb_object, | |
3408 | bm->zb_level, bm->zb_blkid >> 20) % spa->spa_alloc_count; | |
3409 | mutex_enter(&spa->spa_alloc_locks[zio->io_allocator]); | |
3dfb57a3 | 3410 | ASSERT(zio->io_type == ZIO_TYPE_WRITE); |
cc99f275 | 3411 | zio->io_metaslab_class = mc; |
492f64e9 | 3412 | avl_add(&spa->spa_alloc_trees[zio->io_allocator], zio); |
cc99f275 | 3413 | nio = zio_io_to_allocate(spa, zio->io_allocator); |
492f64e9 | 3414 | mutex_exit(&spa->spa_alloc_locks[zio->io_allocator]); |
62840030 | 3415 | return (nio); |
3dfb57a3 DB |
3416 | } |
3417 | ||
cc99f275 | 3418 | static void |
492f64e9 | 3419 | zio_allocate_dispatch(spa_t *spa, int allocator) |
3dfb57a3 DB |
3420 | { |
3421 | zio_t *zio; | |
3422 | ||
492f64e9 PD |
3423 | mutex_enter(&spa->spa_alloc_locks[allocator]); |
3424 | zio = zio_io_to_allocate(spa, allocator); | |
3425 | mutex_exit(&spa->spa_alloc_locks[allocator]); | |
3dfb57a3 DB |
3426 | if (zio == NULL) |
3427 | return; | |
3428 | ||
3429 | ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE); | |
3430 | ASSERT0(zio->io_error); | |
3431 | zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE); | |
3432 | } | |
3433 | ||
62840030 | 3434 | static zio_t * |
34dc7c2f BB |
3435 | zio_dva_allocate(zio_t *zio) |
3436 | { | |
3437 | spa_t *spa = zio->io_spa; | |
cc99f275 | 3438 | metaslab_class_t *mc; |
34dc7c2f BB |
3439 | blkptr_t *bp = zio->io_bp; |
3440 | int error; | |
6d974228 | 3441 | int flags = 0; |
34dc7c2f | 3442 | |
9babb374 BB |
3443 | if (zio->io_gang_leader == NULL) { |
3444 | ASSERT(zio->io_child_type > ZIO_CHILD_GANG); | |
3445 | zio->io_gang_leader = zio; | |
3446 | } | |
3447 | ||
34dc7c2f | 3448 | ASSERT(BP_IS_HOLE(bp)); |
c99c9001 | 3449 | ASSERT0(BP_GET_NDVAS(bp)); |
428870ff BB |
3450 | ASSERT3U(zio->io_prop.zp_copies, >, 0); |
3451 | ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); | |
34dc7c2f BB |
3452 | ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); |
3453 | ||
920dd524 | 3454 | flags |= (zio->io_flags & ZIO_FLAG_FASTWRITE) ? METASLAB_FASTWRITE : 0; |
3dfb57a3 DB |
3455 | if (zio->io_flags & ZIO_FLAG_NODATA) |
3456 | flags |= METASLAB_DONT_THROTTLE; | |
3457 | if (zio->io_flags & ZIO_FLAG_GANG_CHILD) | |
3458 | flags |= METASLAB_GANG_CHILD; | |
3459 | if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE) | |
3460 | flags |= METASLAB_ASYNC_ALLOC; | |
3461 | ||
cc99f275 DB |
3462 | /* |
3463 | * if not already chosen, locate an appropriate allocation class | |
3464 | */ | |
3465 | mc = zio->io_metaslab_class; | |
3466 | if (mc == NULL) { | |
3467 | mc = spa_preferred_class(spa, zio->io_size, | |
3468 | zio->io_prop.zp_type, zio->io_prop.zp_level, | |
3469 | zio->io_prop.zp_zpl_smallblk); | |
3470 | zio->io_metaslab_class = mc; | |
3471 | } | |
3472 | ||
aa755b35 MA |
3473 | /* |
3474 | * Try allocating the block in the usual metaslab class. | |
3475 | * If that's full, allocate it in the normal class. | |
3476 | * If that's full, allocate as a gang block, | |
3477 | * and if all are full, the allocation fails (which shouldn't happen). | |
3478 | * | |
3479 | * Note that we do not fall back on embedded slog (ZIL) space, to | |
3480 | * preserve unfragmented slog space, which is critical for decent | |
3481 | * sync write performance. If a log allocation fails, we will fall | |
3482 | * back to spa_sync() which is abysmal for performance. | |
3483 | */ | |
b128c09f | 3484 | error = metaslab_alloc(spa, mc, zio->io_size, bp, |
4e21fd06 | 3485 | zio->io_prop.zp_copies, zio->io_txg, NULL, flags, |
492f64e9 | 3486 | &zio->io_alloc_list, zio, zio->io_allocator); |
34dc7c2f | 3487 | |
cc99f275 DB |
3488 | /* |
3489 | * Fallback to normal class when an alloc class is full | |
3490 | */ | |
3491 | if (error == ENOSPC && mc != spa_normal_class(spa)) { | |
3492 | /* | |
3493 | * If throttling, transfer reservation over to normal class. | |
3494 | * The io_allocator slot can remain the same even though we | |
3495 | * are switching classes. | |
3496 | */ | |
3497 | if (mc->mc_alloc_throttle_enabled && | |
3498 | (zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) { | |
3499 | metaslab_class_throttle_unreserve(mc, | |
3500 | zio->io_prop.zp_copies, zio->io_allocator, zio); | |
3501 | zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING; | |
3502 | ||
aa755b35 MA |
3503 | VERIFY(metaslab_class_throttle_reserve( |
3504 | spa_normal_class(spa), | |
cc99f275 DB |
3505 | zio->io_prop.zp_copies, zio->io_allocator, zio, |
3506 | flags | METASLAB_MUST_RESERVE)); | |
cc99f275 | 3507 | } |
aa755b35 MA |
3508 | zio->io_metaslab_class = mc = spa_normal_class(spa); |
3509 | if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) { | |
3510 | zfs_dbgmsg("%s: metaslab allocation failure, " | |
3511 | "trying normal class: zio %px, size %llu, error %d", | |
3512 | spa_name(spa), zio, zio->io_size, error); | |
3513 | } | |
cc99f275 DB |
3514 | |
3515 | error = metaslab_alloc(spa, mc, zio->io_size, bp, | |
3516 | zio->io_prop.zp_copies, zio->io_txg, NULL, flags, | |
3517 | &zio->io_alloc_list, zio, zio->io_allocator); | |
3518 | } | |
3519 | ||
aa755b35 MA |
3520 | if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) { |
3521 | if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) { | |
3522 | zfs_dbgmsg("%s: metaslab allocation failure, " | |
3523 | "trying ganging: zio %px, size %llu, error %d", | |
3524 | spa_name(spa), zio, zio->io_size, error); | |
3525 | } | |
3526 | return (zio_write_gang_block(zio, mc)); | |
3527 | } | |
3dfb57a3 | 3528 | if (error != 0) { |
aa755b35 MA |
3529 | if (error != ENOSPC || |
3530 | (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC)) { | |
3531 | zfs_dbgmsg("%s: metaslab allocation failure: zio %px, " | |
3532 | "size %llu, error %d", | |
3533 | spa_name(spa), zio, zio->io_size, error); | |
3534 | } | |
34dc7c2f BB |
3535 | zio->io_error = error; |
3536 | } | |
3537 | ||
62840030 | 3538 | return (zio); |
34dc7c2f BB |
3539 | } |
3540 | ||
62840030 | 3541 | static zio_t * |
34dc7c2f BB |
3542 | zio_dva_free(zio_t *zio) |
3543 | { | |
b128c09f | 3544 | metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); |
34dc7c2f | 3545 | |
62840030 | 3546 | return (zio); |
34dc7c2f BB |
3547 | } |
3548 | ||
62840030 | 3549 | static zio_t * |
34dc7c2f BB |
3550 | zio_dva_claim(zio_t *zio) |
3551 | { | |
b128c09f BB |
3552 | int error; |
3553 | ||
3554 | error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); | |
3555 | if (error) | |
3556 | zio->io_error = error; | |
34dc7c2f | 3557 | |
62840030 | 3558 | return (zio); |
34dc7c2f BB |
3559 | } |
3560 | ||
b128c09f BB |
3561 | /* |
3562 | * Undo an allocation. This is used by zio_done() when an I/O fails | |
3563 | * and we want to give back the block we just allocated. | |
3564 | * This handles both normal blocks and gang blocks. | |
3565 | */ | |
3566 | static void | |
3567 | zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) | |
3568 | { | |
b128c09f | 3569 | ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); |
428870ff | 3570 | ASSERT(zio->io_bp_override == NULL); |
b128c09f BB |
3571 | |
3572 | if (!BP_IS_HOLE(bp)) | |
428870ff | 3573 | metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE); |
b128c09f BB |
3574 | |
3575 | if (gn != NULL) { | |
1c27024e | 3576 | for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { |
b128c09f BB |
3577 | zio_dva_unallocate(zio, gn->gn_child[g], |
3578 | &gn->gn_gbh->zg_blkptr[g]); | |
3579 | } | |
3580 | } | |
3581 | } | |
3582 | ||
3583 | /* | |
3584 | * Try to allocate an intent log block. Return 0 on success, errno on failure. | |
3585 | */ | |
3586 | int | |
b5256303 TC |
3587 | zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp, |
3588 | uint64_t size, boolean_t *slog) | |
b128c09f | 3589 | { |
428870ff | 3590 | int error = 1; |
4e21fd06 | 3591 | zio_alloc_list_t io_alloc_list; |
b128c09f | 3592 | |
428870ff BB |
3593 | ASSERT(txg > spa_syncing_txg(spa)); |
3594 | ||
4e21fd06 | 3595 | metaslab_trace_init(&io_alloc_list); |
cc99f275 DB |
3596 | |
3597 | /* | |
3598 | * Block pointer fields are useful to metaslabs for stats and debugging. | |
3599 | * Fill in the obvious ones before calling into metaslab_alloc(). | |
3600 | */ | |
3601 | BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); | |
3602 | BP_SET_PSIZE(new_bp, size); | |
3603 | BP_SET_LEVEL(new_bp, 0); | |
3604 | ||
492f64e9 PD |
3605 | /* |
3606 | * When allocating a zil block, we don't have information about | |
3607 | * the final destination of the block except the objset it's part | |
3608 | * of, so we just hash the objset ID to pick the allocator to get | |
3609 | * some parallelism. | |
3610 | */ | |
be5c6d96 MA |
3611 | int flags = METASLAB_FASTWRITE | METASLAB_ZIL; |
3612 | int allocator = cityhash4(0, 0, 0, os->os_dsl_dataset->ds_object) % | |
3613 | spa->spa_alloc_count; | |
aa755b35 MA |
3614 | error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1, |
3615 | txg, NULL, flags, &io_alloc_list, NULL, allocator); | |
3616 | *slog = (error == 0); | |
3617 | if (error != 0) { | |
3618 | error = metaslab_alloc(spa, spa_embedded_log_class(spa), size, | |
3619 | new_bp, 1, txg, NULL, flags, | |
3620 | &io_alloc_list, NULL, allocator); | |
3621 | } | |
3622 | if (error != 0) { | |
3623 | error = metaslab_alloc(spa, spa_normal_class(spa), size, | |
3624 | new_bp, 1, txg, NULL, flags, | |
3625 | &io_alloc_list, NULL, allocator); | |
ebf8e3a2 | 3626 | } |
4e21fd06 | 3627 | metaslab_trace_fini(&io_alloc_list); |
b128c09f BB |
3628 | |
3629 | if (error == 0) { | |
3630 | BP_SET_LSIZE(new_bp, size); | |
3631 | BP_SET_PSIZE(new_bp, size); | |
3632 | BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); | |
428870ff BB |
3633 | BP_SET_CHECKSUM(new_bp, |
3634 | spa_version(spa) >= SPA_VERSION_SLIM_ZIL | |
3635 | ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); | |
b128c09f BB |
3636 | BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); |
3637 | BP_SET_LEVEL(new_bp, 0); | |
428870ff | 3638 | BP_SET_DEDUP(new_bp, 0); |
b128c09f | 3639 | BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); |
b5256303 TC |
3640 | |
3641 | /* | |
3642 | * encrypted blocks will require an IV and salt. We generate | |
3643 | * these now since we will not be rewriting the bp at | |
3644 | * rewrite time. | |
3645 | */ | |
3646 | if (os->os_encrypted) { | |
3647 | uint8_t iv[ZIO_DATA_IV_LEN]; | |
3648 | uint8_t salt[ZIO_DATA_SALT_LEN]; | |
3649 | ||
3650 | BP_SET_CRYPT(new_bp, B_TRUE); | |
3651 | VERIFY0(spa_crypt_get_salt(spa, | |
3652 | dmu_objset_id(os), salt)); | |
3653 | VERIFY0(zio_crypt_generate_iv(iv)); | |
3654 | ||
3655 | zio_crypt_encode_params_bp(new_bp, salt, iv); | |
3656 | } | |
1ce23dca PS |
3657 | } else { |
3658 | zfs_dbgmsg("%s: zil block allocation failure: " | |
3659 | "size %llu, error %d", spa_name(spa), size, error); | |
b128c09f BB |
3660 | } |
3661 | ||
3662 | return (error); | |
3663 | } | |
3664 | ||
34dc7c2f BB |
3665 | /* |
3666 | * ========================================================================== | |
3667 | * Read and write to physical devices | |
3668 | * ========================================================================== | |
3669 | */ | |
98b25418 | 3670 | |
98b25418 GW |
3671 | /* |
3672 | * Issue an I/O to the underlying vdev. Typically the issue pipeline | |
3673 | * stops after this stage and will resume upon I/O completion. | |
3674 | * However, there are instances where the vdev layer may need to | |
3675 | * continue the pipeline when an I/O was not issued. Since the I/O | |
3676 | * that was sent to the vdev layer might be different than the one | |
3677 | * currently active in the pipeline (see vdev_queue_io()), we explicitly | |
3678 | * force the underlying vdev layers to call either zio_execute() or | |
3679 | * zio_interrupt() to ensure that the pipeline continues with the correct I/O. | |
3680 | */ | |
62840030 | 3681 | static zio_t * |
34dc7c2f BB |
3682 | zio_vdev_io_start(zio_t *zio) |
3683 | { | |
3684 | vdev_t *vd = zio->io_vd; | |
34dc7c2f BB |
3685 | uint64_t align; |
3686 | spa_t *spa = zio->io_spa; | |
3687 | ||
193a37cb TH |
3688 | zio->io_delay = 0; |
3689 | ||
b128c09f BB |
3690 | ASSERT(zio->io_error == 0); |
3691 | ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); | |
34dc7c2f | 3692 | |
b128c09f BB |
3693 | if (vd == NULL) { |
3694 | if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) | |
3695 | spa_config_enter(spa, SCL_ZIO, zio, RW_READER); | |
34dc7c2f | 3696 | |
b128c09f BB |
3697 | /* |
3698 | * The mirror_ops handle multiple DVAs in a single BP. | |
3699 | */ | |
98b25418 | 3700 | vdev_mirror_ops.vdev_op_io_start(zio); |
62840030 | 3701 | return (NULL); |
34dc7c2f BB |
3702 | } |
3703 | ||
3dfb57a3 | 3704 | ASSERT3P(zio->io_logical, !=, zio); |
6cb8e530 PZ |
3705 | if (zio->io_type == ZIO_TYPE_WRITE) { |
3706 | ASSERT(spa->spa_trust_config); | |
3707 | ||
a1d477c2 MA |
3708 | /* |
3709 | * Note: the code can handle other kinds of writes, | |
3710 | * but we don't expect them. | |
3711 | */ | |
6cb8e530 PZ |
3712 | if (zio->io_vd->vdev_removing) { |
3713 | ASSERT(zio->io_flags & | |
3714 | (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL | | |
3715 | ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)); | |
3716 | } | |
a1d477c2 | 3717 | } |
3dfb57a3 | 3718 | |
b128c09f BB |
3719 | align = 1ULL << vd->vdev_top->vdev_ashift; |
3720 | ||
b02fe35d AR |
3721 | if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) && |
3722 | P2PHASE(zio->io_size, align) != 0) { | |
3723 | /* Transform logical writes to be a full physical block size. */ | |
34dc7c2f | 3724 | uint64_t asize = P2ROUNDUP(zio->io_size, align); |
a6255b7f | 3725 | abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize); |
178e73b3 | 3726 | ASSERT(vd == vd->vdev_top); |
34dc7c2f | 3727 | if (zio->io_type == ZIO_TYPE_WRITE) { |
a6255b7f DQ |
3728 | abd_copy(abuf, zio->io_abd, zio->io_size); |
3729 | abd_zero_off(abuf, zio->io_size, asize - zio->io_size); | |
34dc7c2f | 3730 | } |
b128c09f | 3731 | zio_push_transform(zio, abuf, asize, asize, zio_subblock); |
34dc7c2f BB |
3732 | } |
3733 | ||
b02fe35d AR |
3734 | /* |
3735 | * If this is not a physical io, make sure that it is properly aligned | |
3736 | * before proceeding. | |
3737 | */ | |
3738 | if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) { | |
3739 | ASSERT0(P2PHASE(zio->io_offset, align)); | |
3740 | ASSERT0(P2PHASE(zio->io_size, align)); | |
3741 | } else { | |
3742 | /* | |
3743 | * For physical writes, we allow 512b aligned writes and assume | |
3744 | * the device will perform a read-modify-write as necessary. | |
3745 | */ | |
3746 | ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE)); | |
3747 | ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE)); | |
3748 | } | |
3749 | ||
572e2857 | 3750 | VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa)); |
fb5f0bc8 BB |
3751 | |
3752 | /* | |
3753 | * If this is a repair I/O, and there's no self-healing involved -- | |
3754 | * that is, we're just resilvering what we expect to resilver -- | |
3755 | * then don't do the I/O unless zio's txg is actually in vd's DTL. | |
9e052db4 MA |
3756 | * This prevents spurious resilvering. |
3757 | * | |
3758 | * There are a few ways that we can end up creating these spurious | |
3759 | * resilver i/os: | |
3760 | * | |
3761 | * 1. A resilver i/o will be issued if any DVA in the BP has a | |
3762 | * dirty DTL. The mirror code will issue resilver writes to | |
3763 | * each DVA, including the one(s) that are not on vdevs with dirty | |
3764 | * DTLs. | |
3765 | * | |
3766 | * 2. With nested replication, which happens when we have a | |
3767 | * "replacing" or "spare" vdev that's a child of a mirror or raidz. | |
3768 | * For example, given mirror(replacing(A+B), C), it's likely that | |
3769 | * only A is out of date (it's the new device). In this case, we'll | |
3770 | * read from C, then use the data to resilver A+B -- but we don't | |
3771 | * actually want to resilver B, just A. The top-level mirror has no | |
3772 | * way to know this, so instead we just discard unnecessary repairs | |
3773 | * as we work our way down the vdev tree. | |
3774 | * | |
3775 | * 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc. | |
3776 | * The same logic applies to any form of nested replication: ditto | |
3777 | * + mirror, RAID-Z + replacing, etc. | |
3778 | * | |
3779 | * However, indirect vdevs point off to other vdevs which may have | |
3780 | * DTL's, so we never bypass them. The child i/os on concrete vdevs | |
3781 | * will be properly bypassed instead. | |
b2255edc BB |
3782 | * |
3783 | * Leaf DTL_PARTIAL can be empty when a legitimate write comes from | |
3784 | * a dRAID spare vdev. For example, when a dRAID spare is first | |
3785 | * used, its spare blocks need to be written to but the leaf vdev's | |
3786 | * of such blocks can have empty DTL_PARTIAL. | |
3787 | * | |
3788 | * There seemed no clean way to allow such writes while bypassing | |
3789 | * spurious ones. At this point, just avoid all bypassing for dRAID | |
3790 | * for correctness. | |
fb5f0bc8 BB |
3791 | */ |
3792 | if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && | |
3793 | !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && | |
3794 | zio->io_txg != 0 && /* not a delegated i/o */ | |
9e052db4 | 3795 | vd->vdev_ops != &vdev_indirect_ops && |
b2255edc | 3796 | vd->vdev_top->vdev_ops != &vdev_draid_ops && |
fb5f0bc8 BB |
3797 | !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { |
3798 | ASSERT(zio->io_type == ZIO_TYPE_WRITE); | |
fb5f0bc8 | 3799 | zio_vdev_io_bypass(zio); |
62840030 | 3800 | return (zio); |
fb5f0bc8 | 3801 | } |
34dc7c2f | 3802 | |
b2255edc BB |
3803 | /* |
3804 | * Select the next best leaf I/O to process. Distributed spares are | |
3805 | * excluded since they dispatch the I/O directly to a leaf vdev after | |
3806 | * applying the dRAID mapping. | |
3807 | */ | |
3808 | if (vd->vdev_ops->vdev_op_leaf && | |
3809 | vd->vdev_ops != &vdev_draid_spare_ops && | |
3810 | (zio->io_type == ZIO_TYPE_READ || | |
3811 | zio->io_type == ZIO_TYPE_WRITE || | |
3812 | zio->io_type == ZIO_TYPE_TRIM)) { | |
b128c09f | 3813 | |
b0bc7a84 | 3814 | if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio)) |
62840030 | 3815 | return (zio); |
b128c09f BB |
3816 | |
3817 | if ((zio = vdev_queue_io(zio)) == NULL) | |
62840030 | 3818 | return (NULL); |
b128c09f BB |
3819 | |
3820 | if (!vdev_accessible(vd, zio)) { | |
2e528b49 | 3821 | zio->io_error = SET_ERROR(ENXIO); |
b128c09f | 3822 | zio_interrupt(zio); |
62840030 | 3823 | return (NULL); |
b128c09f | 3824 | } |
67103816 | 3825 | zio->io_delay = gethrtime(); |
b128c09f BB |
3826 | } |
3827 | ||
98b25418 | 3828 | vd->vdev_ops->vdev_op_io_start(zio); |
62840030 | 3829 | return (NULL); |
34dc7c2f BB |
3830 | } |
3831 | ||
62840030 | 3832 | static zio_t * |
34dc7c2f BB |
3833 | zio_vdev_io_done(zio_t *zio) |
3834 | { | |
b128c09f BB |
3835 | vdev_t *vd = zio->io_vd; |
3836 | vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; | |
3837 | boolean_t unexpected_error = B_FALSE; | |
34dc7c2f | 3838 | |
ddc751d5 | 3839 | if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { |
62840030 | 3840 | return (NULL); |
ddc751d5 | 3841 | } |
34dc7c2f | 3842 | |
1b939560 BB |
3843 | ASSERT(zio->io_type == ZIO_TYPE_READ || |
3844 | zio->io_type == ZIO_TYPE_WRITE || zio->io_type == ZIO_TYPE_TRIM); | |
b128c09f | 3845 | |
193a37cb TH |
3846 | if (zio->io_delay) |
3847 | zio->io_delay = gethrtime() - zio->io_delay; | |
3848 | ||
b2255edc BB |
3849 | if (vd != NULL && vd->vdev_ops->vdev_op_leaf && |
3850 | vd->vdev_ops != &vdev_draid_spare_ops) { | |
b128c09f BB |
3851 | vdev_queue_io_done(zio); |
3852 | ||
3853 | if (zio->io_type == ZIO_TYPE_WRITE) | |
3854 | vdev_cache_write(zio); | |
3855 | ||
3856 | if (zio_injection_enabled && zio->io_error == 0) | |
d977122d DB |
3857 | zio->io_error = zio_handle_device_injections(vd, zio, |
3858 | EIO, EILSEQ); | |
b128c09f BB |
3859 | |
3860 | if (zio_injection_enabled && zio->io_error == 0) | |
3861 | zio->io_error = zio_handle_label_injection(zio, EIO); | |
3862 | ||
1b939560 | 3863 | if (zio->io_error && zio->io_type != ZIO_TYPE_TRIM) { |
b128c09f | 3864 | if (!vdev_accessible(vd, zio)) { |
2e528b49 | 3865 | zio->io_error = SET_ERROR(ENXIO); |
b128c09f BB |
3866 | } else { |
3867 | unexpected_error = B_TRUE; | |
3868 | } | |
3869 | } | |
3870 | } | |
3871 | ||
3872 | ops->vdev_op_io_done(zio); | |
34dc7c2f | 3873 | |
f43615d0 | 3874 | if (unexpected_error) |
d164b209 | 3875 | VERIFY(vdev_probe(vd, zio) == NULL); |
34dc7c2f | 3876 | |
62840030 | 3877 | return (zio); |
34dc7c2f BB |
3878 | } |
3879 | ||
a8b2e306 TC |
3880 | /* |
3881 | * This function is used to change the priority of an existing zio that is | |
3882 | * currently in-flight. This is used by the arc to upgrade priority in the | |
3883 | * event that a demand read is made for a block that is currently queued | |
3884 | * as a scrub or async read IO. Otherwise, the high priority read request | |
3885 | * would end up having to wait for the lower priority IO. | |
3886 | */ | |
3887 | void | |
3888 | zio_change_priority(zio_t *pio, zio_priority_t priority) | |
3889 | { | |
3890 | zio_t *cio, *cio_next; | |
3891 | zio_link_t *zl = NULL; | |
3892 | ||
3893 | ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); | |
3894 | ||
3895 | if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) { | |
3896 | vdev_queue_change_io_priority(pio, priority); | |
3897 | } else { | |
3898 | pio->io_priority = priority; | |
3899 | } | |
3900 | ||
3901 | mutex_enter(&pio->io_lock); | |
3902 | for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) { | |
3903 | cio_next = zio_walk_children(pio, &zl); | |
3904 | zio_change_priority(cio, priority); | |
3905 | } | |
3906 | mutex_exit(&pio->io_lock); | |
3907 | } | |
3908 | ||
428870ff BB |
3909 | /* |
3910 | * For non-raidz ZIOs, we can just copy aside the bad data read from the | |
3911 | * disk, and use that to finish the checksum ereport later. | |
3912 | */ | |
3913 | static void | |
3914 | zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, | |
84c07ada | 3915 | const abd_t *good_buf) |
428870ff BB |
3916 | { |
3917 | /* no processing needed */ | |
3918 | zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); | |
3919 | } | |
3920 | ||
3921 | /*ARGSUSED*/ | |
3922 | void | |
330c6c05 | 3923 | zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr) |
428870ff | 3924 | { |
84c07ada | 3925 | void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size); |
428870ff | 3926 | |
84c07ada | 3927 | abd_copy(abd, zio->io_abd, zio->io_size); |
428870ff BB |
3928 | |
3929 | zcr->zcr_cbinfo = zio->io_size; | |
84c07ada | 3930 | zcr->zcr_cbdata = abd; |
428870ff | 3931 | zcr->zcr_finish = zio_vsd_default_cksum_finish; |
84c07ada | 3932 | zcr->zcr_free = zio_abd_free; |
428870ff BB |
3933 | } |
3934 | ||
62840030 | 3935 | static zio_t * |
34dc7c2f BB |
3936 | zio_vdev_io_assess(zio_t *zio) |
3937 | { | |
3938 | vdev_t *vd = zio->io_vd; | |
b128c09f | 3939 | |
ddc751d5 | 3940 | if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) { |
62840030 | 3941 | return (NULL); |
ddc751d5 | 3942 | } |
b128c09f BB |
3943 | |
3944 | if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) | |
3945 | spa_config_exit(zio->io_spa, SCL_ZIO, zio); | |
3946 | ||
3947 | if (zio->io_vsd != NULL) { | |
428870ff | 3948 | zio->io_vsd_ops->vsd_free(zio); |
b128c09f | 3949 | zio->io_vsd = NULL; |
34dc7c2f BB |
3950 | } |
3951 | ||
b128c09f | 3952 | if (zio_injection_enabled && zio->io_error == 0) |
34dc7c2f BB |
3953 | zio->io_error = zio_handle_fault_injection(zio, EIO); |
3954 | ||
3955 | /* | |
3956 | * If the I/O failed, determine whether we should attempt to retry it. | |
428870ff BB |
3957 | * |
3958 | * On retry, we cut in line in the issue queue, since we don't want | |
3959 | * compression/checksumming/etc. work to prevent our (cheap) IO reissue. | |
34dc7c2f | 3960 | */ |
b128c09f BB |
3961 | if (zio->io_error && vd == NULL && |
3962 | !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { | |
3963 | ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ | |
3964 | ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ | |
34dc7c2f | 3965 | zio->io_error = 0; |
b128c09f BB |
3966 | zio->io_flags |= ZIO_FLAG_IO_RETRY | |
3967 | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE; | |
428870ff BB |
3968 | zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; |
3969 | zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, | |
3970 | zio_requeue_io_start_cut_in_line); | |
62840030 | 3971 | return (NULL); |
34dc7c2f BB |
3972 | } |
3973 | ||
b128c09f BB |
3974 | /* |
3975 | * If we got an error on a leaf device, convert it to ENXIO | |
3976 | * if the device is not accessible at all. | |
3977 | */ | |
3978 | if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && | |
3979 | !vdev_accessible(vd, zio)) | |
2e528b49 | 3980 | zio->io_error = SET_ERROR(ENXIO); |
b128c09f BB |
3981 | |
3982 | /* | |
3983 | * If we can't write to an interior vdev (mirror or RAID-Z), | |
3984 | * set vdev_cant_write so that we stop trying to allocate from it. | |
3985 | */ | |
3986 | if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && | |
13fe0198 | 3987 | vd != NULL && !vd->vdev_ops->vdev_op_leaf) { |
2b56a634 MA |
3988 | vdev_dbgmsg(vd, "zio_vdev_io_assess(zio=%px) setting " |
3989 | "cant_write=TRUE due to write failure with ENXIO", | |
3990 | zio); | |
b128c09f | 3991 | vd->vdev_cant_write = B_TRUE; |
13fe0198 | 3992 | } |
b128c09f | 3993 | |
298ec40b GM |
3994 | /* |
3995 | * If a cache flush returns ENOTSUP or ENOTTY, we know that no future | |
1b939560 BB |
3996 | * attempts will ever succeed. In this case we set a persistent |
3997 | * boolean flag so that we don't bother with it in the future. | |
298ec40b GM |
3998 | */ |
3999 | if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) && | |
4000 | zio->io_type == ZIO_TYPE_IOCTL && | |
4001 | zio->io_cmd == DKIOCFLUSHWRITECACHE && vd != NULL) | |
4002 | vd->vdev_nowritecache = B_TRUE; | |
4003 | ||
b128c09f BB |
4004 | if (zio->io_error) |
4005 | zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; | |
4006 | ||
e8b96c60 MA |
4007 | if (vd != NULL && vd->vdev_ops->vdev_op_leaf && |
4008 | zio->io_physdone != NULL) { | |
4009 | ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED)); | |
4010 | ASSERT(zio->io_child_type == ZIO_CHILD_VDEV); | |
4011 | zio->io_physdone(zio->io_logical); | |
4012 | } | |
4013 | ||
62840030 | 4014 | return (zio); |
34dc7c2f BB |
4015 | } |
4016 | ||
4017 | void | |
4018 | zio_vdev_io_reissue(zio_t *zio) | |
4019 | { | |
4020 | ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); | |
4021 | ASSERT(zio->io_error == 0); | |
4022 | ||
428870ff | 4023 | zio->io_stage >>= 1; |
34dc7c2f BB |
4024 | } |
4025 | ||
4026 | void | |
4027 | zio_vdev_io_redone(zio_t *zio) | |
4028 | { | |
4029 | ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); | |
4030 | ||
428870ff | 4031 | zio->io_stage >>= 1; |
34dc7c2f BB |
4032 | } |
4033 | ||
4034 | void | |
4035 | zio_vdev_io_bypass(zio_t *zio) | |
4036 | { | |
4037 | ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); | |
4038 | ASSERT(zio->io_error == 0); | |
4039 | ||
4040 | zio->io_flags |= ZIO_FLAG_IO_BYPASS; | |
428870ff | 4041 | zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; |
34dc7c2f BB |
4042 | } |
4043 | ||
b5256303 TC |
4044 | /* |
4045 | * ========================================================================== | |
4046 | * Encrypt and store encryption parameters | |
4047 | * ========================================================================== | |
4048 | */ | |
4049 | ||
4050 | ||
4051 | /* | |
4052 | * This function is used for ZIO_STAGE_ENCRYPT. It is responsible for | |
4053 | * managing the storage of encryption parameters and passing them to the | |
4054 | * lower-level encryption functions. | |
4055 | */ | |
62840030 | 4056 | static zio_t * |
b5256303 TC |
4057 | zio_encrypt(zio_t *zio) |
4058 | { | |
4059 | zio_prop_t *zp = &zio->io_prop; | |
4060 | spa_t *spa = zio->io_spa; | |
4061 | blkptr_t *bp = zio->io_bp; | |
4062 | uint64_t psize = BP_GET_PSIZE(bp); | |
ae76f45c | 4063 | uint64_t dsobj = zio->io_bookmark.zb_objset; |
b5256303 TC |
4064 | dmu_object_type_t ot = BP_GET_TYPE(bp); |
4065 | void *enc_buf = NULL; | |
4066 | abd_t *eabd = NULL; | |
4067 | uint8_t salt[ZIO_DATA_SALT_LEN]; | |
4068 | uint8_t iv[ZIO_DATA_IV_LEN]; | |
4069 | uint8_t mac[ZIO_DATA_MAC_LEN]; | |
4070 | boolean_t no_crypt = B_FALSE; | |
4071 | ||
4072 | /* the root zio already encrypted the data */ | |
4073 | if (zio->io_child_type == ZIO_CHILD_GANG) | |
62840030 | 4074 | return (zio); |
b5256303 TC |
4075 | |
4076 | /* only ZIL blocks are re-encrypted on rewrite */ | |
4077 | if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG) | |
62840030 | 4078 | return (zio); |
b5256303 TC |
4079 | |
4080 | if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) { | |
4081 | BP_SET_CRYPT(bp, B_FALSE); | |
62840030 | 4082 | return (zio); |
b5256303 TC |
4083 | } |
4084 | ||
4085 | /* if we are doing raw encryption set the provided encryption params */ | |
4086 | if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) { | |
ae76f45c | 4087 | ASSERT0(BP_GET_LEVEL(bp)); |
b5256303 TC |
4088 | BP_SET_CRYPT(bp, B_TRUE); |
4089 | BP_SET_BYTEORDER(bp, zp->zp_byteorder); | |
4090 | if (ot != DMU_OT_OBJSET) | |
4091 | zio_crypt_encode_mac_bp(bp, zp->zp_mac); | |
ae76f45c TC |
4092 | |
4093 | /* dnode blocks must be written out in the provided byteorder */ | |
4094 | if (zp->zp_byteorder != ZFS_HOST_BYTEORDER && | |
4095 | ot == DMU_OT_DNODE) { | |
4096 | void *bswap_buf = zio_buf_alloc(psize); | |
4097 | abd_t *babd = abd_get_from_buf(bswap_buf, psize); | |
4098 | ||
4099 | ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); | |
4100 | abd_copy_to_buf(bswap_buf, zio->io_abd, psize); | |
4101 | dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf, | |
4102 | psize); | |
4103 | ||
4104 | abd_take_ownership_of_buf(babd, B_TRUE); | |
4105 | zio_push_transform(zio, babd, psize, psize, NULL); | |
4106 | } | |
4107 | ||
b5256303 TC |
4108 | if (DMU_OT_IS_ENCRYPTED(ot)) |
4109 | zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv); | |
62840030 | 4110 | return (zio); |
b5256303 TC |
4111 | } |
4112 | ||
4113 | /* indirect blocks only maintain a cksum of the lower level MACs */ | |
4114 | if (BP_GET_LEVEL(bp) > 0) { | |
4115 | BP_SET_CRYPT(bp, B_TRUE); | |
4116 | VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE, | |
4117 | zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp), | |
4118 | mac)); | |
4119 | zio_crypt_encode_mac_bp(bp, mac); | |
62840030 | 4120 | return (zio); |
b5256303 TC |
4121 | } |
4122 | ||
4123 | /* | |
4124 | * Objset blocks are a special case since they have 2 256-bit MACs | |
4125 | * embedded within them. | |
4126 | */ | |
4127 | if (ot == DMU_OT_OBJSET) { | |
4128 | ASSERT0(DMU_OT_IS_ENCRYPTED(ot)); | |
4129 | ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); | |
4130 | BP_SET_CRYPT(bp, B_TRUE); | |
ae76f45c TC |
4131 | VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj, |
4132 | zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp))); | |
62840030 | 4133 | return (zio); |
b5256303 TC |
4134 | } |
4135 | ||
4136 | /* unencrypted object types are only authenticated with a MAC */ | |
4137 | if (!DMU_OT_IS_ENCRYPTED(ot)) { | |
4138 | BP_SET_CRYPT(bp, B_TRUE); | |
ae76f45c TC |
4139 | VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj, |
4140 | zio->io_abd, psize, mac)); | |
b5256303 | 4141 | zio_crypt_encode_mac_bp(bp, mac); |
62840030 | 4142 | return (zio); |
b5256303 TC |
4143 | } |
4144 | ||
4145 | /* | |
4146 | * Later passes of sync-to-convergence may decide to rewrite data | |
4147 | * in place to avoid more disk reallocations. This presents a problem | |
d611989f | 4148 | * for encryption because this constitutes rewriting the new data with |
b5256303 TC |
4149 | * the same encryption key and IV. However, this only applies to blocks |
4150 | * in the MOS (particularly the spacemaps) and we do not encrypt the | |
4151 | * MOS. We assert that the zio is allocating or an intent log write | |
4152 | * to enforce this. | |
4153 | */ | |
4154 | ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG); | |
4155 | ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG); | |
4156 | ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION)); | |
4157 | ASSERT3U(psize, !=, 0); | |
4158 | ||
4159 | enc_buf = zio_buf_alloc(psize); | |
4160 | eabd = abd_get_from_buf(enc_buf, psize); | |
4161 | abd_take_ownership_of_buf(eabd, B_TRUE); | |
4162 | ||
4163 | /* | |
4164 | * For an explanation of what encryption parameters are stored | |
4165 | * where, see the block comment in zio_crypt.c. | |
4166 | */ | |
4167 | if (ot == DMU_OT_INTENT_LOG) { | |
4168 | zio_crypt_decode_params_bp(bp, salt, iv); | |
4169 | } else { | |
4170 | BP_SET_CRYPT(bp, B_TRUE); | |
4171 | } | |
4172 | ||
4173 | /* Perform the encryption. This should not fail */ | |
be9a5c35 TC |
4174 | VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark, |
4175 | BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), | |
4176 | salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt)); | |
b5256303 TC |
4177 | |
4178 | /* encode encryption metadata into the bp */ | |
4179 | if (ot == DMU_OT_INTENT_LOG) { | |
4180 | /* | |
4181 | * ZIL blocks store the MAC in the embedded checksum, so the | |
4182 | * transform must always be applied. | |
4183 | */ | |
4184 | zio_crypt_encode_mac_zil(enc_buf, mac); | |
4185 | zio_push_transform(zio, eabd, psize, psize, NULL); | |
4186 | } else { | |
4187 | BP_SET_CRYPT(bp, B_TRUE); | |
4188 | zio_crypt_encode_params_bp(bp, salt, iv); | |
4189 | zio_crypt_encode_mac_bp(bp, mac); | |
4190 | ||
4191 | if (no_crypt) { | |
4192 | ASSERT3U(ot, ==, DMU_OT_DNODE); | |
4193 | abd_free(eabd); | |
4194 | } else { | |
4195 | zio_push_transform(zio, eabd, psize, psize, NULL); | |
4196 | } | |
4197 | } | |
4198 | ||
62840030 | 4199 | return (zio); |
b5256303 TC |
4200 | } |
4201 | ||
34dc7c2f BB |
4202 | /* |
4203 | * ========================================================================== | |
4204 | * Generate and verify checksums | |
4205 | * ========================================================================== | |
4206 | */ | |
62840030 | 4207 | static zio_t * |
34dc7c2f BB |
4208 | zio_checksum_generate(zio_t *zio) |
4209 | { | |
34dc7c2f | 4210 | blkptr_t *bp = zio->io_bp; |
b128c09f | 4211 | enum zio_checksum checksum; |
34dc7c2f | 4212 | |
b128c09f BB |
4213 | if (bp == NULL) { |
4214 | /* | |
4215 | * This is zio_write_phys(). | |
4216 | * We're either generating a label checksum, or none at all. | |
4217 | */ | |
4218 | checksum = zio->io_prop.zp_checksum; | |
34dc7c2f | 4219 | |
b128c09f | 4220 | if (checksum == ZIO_CHECKSUM_OFF) |
62840030 | 4221 | return (zio); |
b128c09f BB |
4222 | |
4223 | ASSERT(checksum == ZIO_CHECKSUM_LABEL); | |
4224 | } else { | |
4225 | if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { | |
4226 | ASSERT(!IO_IS_ALLOCATING(zio)); | |
4227 | checksum = ZIO_CHECKSUM_GANG_HEADER; | |
4228 | } else { | |
4229 | checksum = BP_GET_CHECKSUM(bp); | |
4230 | } | |
4231 | } | |
34dc7c2f | 4232 | |
a6255b7f | 4233 | zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size); |
34dc7c2f | 4234 | |
62840030 | 4235 | return (zio); |
34dc7c2f BB |
4236 | } |
4237 | ||
62840030 | 4238 | static zio_t * |
b128c09f | 4239 | zio_checksum_verify(zio_t *zio) |
34dc7c2f | 4240 | { |
428870ff | 4241 | zio_bad_cksum_t info; |
b128c09f BB |
4242 | blkptr_t *bp = zio->io_bp; |
4243 | int error; | |
34dc7c2f | 4244 | |
428870ff BB |
4245 | ASSERT(zio->io_vd != NULL); |
4246 | ||
b128c09f BB |
4247 | if (bp == NULL) { |
4248 | /* | |
4249 | * This is zio_read_phys(). | |
4250 | * We're either verifying a label checksum, or nothing at all. | |
4251 | */ | |
4252 | if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) | |
62840030 | 4253 | return (zio); |
34dc7c2f | 4254 | |
b2255edc | 4255 | ASSERT3U(zio->io_prop.zp_checksum, ==, ZIO_CHECKSUM_LABEL); |
b128c09f | 4256 | } |
34dc7c2f | 4257 | |
428870ff | 4258 | if ((error = zio_checksum_error(zio, &info)) != 0) { |
b128c09f | 4259 | zio->io_error = error; |
7a3066ff MA |
4260 | if (error == ECKSUM && |
4261 | !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { | |
03e02e5b | 4262 | (void) zfs_ereport_start_checksum(zio->io_spa, |
b5256303 | 4263 | zio->io_vd, &zio->io_bookmark, zio, |
330c6c05 | 4264 | zio->io_offset, zio->io_size, &info); |
03e02e5b DB |
4265 | mutex_enter(&zio->io_vd->vdev_stat_lock); |
4266 | zio->io_vd->vdev_stat.vs_checksum_errors++; | |
4267 | mutex_exit(&zio->io_vd->vdev_stat_lock); | |
b128c09f | 4268 | } |
34dc7c2f BB |
4269 | } |
4270 | ||
62840030 | 4271 | return (zio); |
34dc7c2f BB |
4272 | } |
4273 | ||
4274 | /* | |
4275 | * Called by RAID-Z to ensure we don't compute the checksum twice. | |
4276 | */ | |
4277 | void | |
4278 | zio_checksum_verified(zio_t *zio) | |
4279 | { | |
428870ff | 4280 | zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; |
34dc7c2f BB |
4281 | } |
4282 | ||
4283 | /* | |
b128c09f BB |
4284 | * ========================================================================== |
4285 | * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. | |
9b67f605 | 4286 | * An error of 0 indicates success. ENXIO indicates whole-device failure, |
d611989f | 4287 | * which may be transient (e.g. unplugged) or permanent. ECKSUM and EIO |
b128c09f BB |
4288 | * indicate errors that are specific to one I/O, and most likely permanent. |
4289 | * Any other error is presumed to be worse because we weren't expecting it. | |
4290 | * ========================================================================== | |
34dc7c2f | 4291 | */ |
b128c09f BB |
4292 | int |
4293 | zio_worst_error(int e1, int e2) | |
34dc7c2f | 4294 | { |
b128c09f BB |
4295 | static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; |
4296 | int r1, r2; | |
4297 | ||
4298 | for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) | |
4299 | if (e1 == zio_error_rank[r1]) | |
4300 | break; | |
34dc7c2f | 4301 | |
b128c09f BB |
4302 | for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) |
4303 | if (e2 == zio_error_rank[r2]) | |
4304 | break; | |
4305 | ||
4306 | return (r1 > r2 ? e1 : e2); | |
34dc7c2f BB |
4307 | } |
4308 | ||
4309 | /* | |
4310 | * ========================================================================== | |
b128c09f | 4311 | * I/O completion |
34dc7c2f BB |
4312 | * ========================================================================== |
4313 | */ | |
62840030 | 4314 | static zio_t * |
b128c09f | 4315 | zio_ready(zio_t *zio) |
34dc7c2f | 4316 | { |
b128c09f | 4317 | blkptr_t *bp = zio->io_bp; |
d164b209 | 4318 | zio_t *pio, *pio_next; |
3dfb57a3 | 4319 | zio_link_t *zl = NULL; |
34dc7c2f | 4320 | |
ddc751d5 GW |
4321 | if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT, |
4322 | ZIO_WAIT_READY)) { | |
62840030 | 4323 | return (NULL); |
ddc751d5 | 4324 | } |
34dc7c2f | 4325 | |
9babb374 | 4326 | if (zio->io_ready) { |
b128c09f | 4327 | ASSERT(IO_IS_ALLOCATING(zio)); |
03c6040b GW |
4328 | ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) || |
4329 | (zio->io_flags & ZIO_FLAG_NOPWRITE)); | |
b128c09f | 4330 | ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); |
34dc7c2f | 4331 | |
b128c09f BB |
4332 | zio->io_ready(zio); |
4333 | } | |
34dc7c2f | 4334 | |
b128c09f BB |
4335 | if (bp != NULL && bp != &zio->io_bp_copy) |
4336 | zio->io_bp_copy = *bp; | |
34dc7c2f | 4337 | |
3dfb57a3 | 4338 | if (zio->io_error != 0) { |
b128c09f | 4339 | zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; |
34dc7c2f | 4340 | |
3dfb57a3 DB |
4341 | if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { |
4342 | ASSERT(IO_IS_ALLOCATING(zio)); | |
4343 | ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); | |
cc99f275 DB |
4344 | ASSERT(zio->io_metaslab_class != NULL); |
4345 | ||
3dfb57a3 DB |
4346 | /* |
4347 | * We were unable to allocate anything, unreserve and | |
4348 | * issue the next I/O to allocate. | |
4349 | */ | |
4350 | metaslab_class_throttle_unreserve( | |
cc99f275 DB |
4351 | zio->io_metaslab_class, zio->io_prop.zp_copies, |
4352 | zio->io_allocator, zio); | |
492f64e9 | 4353 | zio_allocate_dispatch(zio->io_spa, zio->io_allocator); |
3dfb57a3 DB |
4354 | } |
4355 | } | |
4356 | ||
d164b209 BB |
4357 | mutex_enter(&zio->io_lock); |
4358 | zio->io_state[ZIO_WAIT_READY] = 1; | |
3dfb57a3 | 4359 | pio = zio_walk_parents(zio, &zl); |
d164b209 BB |
4360 | mutex_exit(&zio->io_lock); |
4361 | ||
4362 | /* | |
4363 | * As we notify zio's parents, new parents could be added. | |
4364 | * New parents go to the head of zio's io_parent_list, however, | |
4365 | * so we will (correctly) not notify them. The remainder of zio's | |
4366 | * io_parent_list, from 'pio_next' onward, cannot change because | |
4367 | * all parents must wait for us to be done before they can be done. | |
4368 | */ | |
4369 | for (; pio != NULL; pio = pio_next) { | |
3dfb57a3 | 4370 | pio_next = zio_walk_parents(zio, &zl); |
62840030 | 4371 | zio_notify_parent(pio, zio, ZIO_WAIT_READY, NULL); |
d164b209 | 4372 | } |
34dc7c2f | 4373 | |
428870ff BB |
4374 | if (zio->io_flags & ZIO_FLAG_NODATA) { |
4375 | if (BP_IS_GANG(bp)) { | |
4376 | zio->io_flags &= ~ZIO_FLAG_NODATA; | |
4377 | } else { | |
a6255b7f | 4378 | ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE); |
428870ff BB |
4379 | zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; |
4380 | } | |
4381 | } | |
4382 | ||
4383 | if (zio_injection_enabled && | |
4384 | zio->io_spa->spa_syncing_txg == zio->io_txg) | |
4385 | zio_handle_ignored_writes(zio); | |
4386 | ||
62840030 | 4387 | return (zio); |
34dc7c2f BB |
4388 | } |
4389 | ||
3dfb57a3 DB |
4390 | /* |
4391 | * Update the allocation throttle accounting. | |
4392 | */ | |
4393 | static void | |
4394 | zio_dva_throttle_done(zio_t *zio) | |
4395 | { | |
2a8ba608 | 4396 | zio_t *lio __maybe_unused = zio->io_logical; |
3dfb57a3 DB |
4397 | zio_t *pio = zio_unique_parent(zio); |
4398 | vdev_t *vd = zio->io_vd; | |
4399 | int flags = METASLAB_ASYNC_ALLOC; | |
4400 | ||
4401 | ASSERT3P(zio->io_bp, !=, NULL); | |
4402 | ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE); | |
4403 | ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE); | |
4404 | ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV); | |
4405 | ASSERT(vd != NULL); | |
4406 | ASSERT3P(vd, ==, vd->vdev_top); | |
21df134f SB |
4407 | ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY)); |
4408 | ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR)); | |
3dfb57a3 DB |
4409 | ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING); |
4410 | ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE)); | |
4411 | ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA)); | |
4412 | ||
4413 | /* | |
4414 | * Parents of gang children can have two flavors -- ones that | |
4415 | * allocated the gang header (will have ZIO_FLAG_IO_REWRITE set) | |
4416 | * and ones that allocated the constituent blocks. The allocation | |
4417 | * throttle needs to know the allocating parent zio so we must find | |
4418 | * it here. | |
4419 | */ | |
4420 | if (pio->io_child_type == ZIO_CHILD_GANG) { | |
4421 | /* | |
4422 | * If our parent is a rewrite gang child then our grandparent | |
4423 | * would have been the one that performed the allocation. | |
4424 | */ | |
4425 | if (pio->io_flags & ZIO_FLAG_IO_REWRITE) | |
4426 | pio = zio_unique_parent(pio); | |
4427 | flags |= METASLAB_GANG_CHILD; | |
4428 | } | |
4429 | ||
4430 | ASSERT(IO_IS_ALLOCATING(pio)); | |
4431 | ASSERT3P(zio, !=, zio->io_logical); | |
4432 | ASSERT(zio->io_logical != NULL); | |
4433 | ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR)); | |
4434 | ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE); | |
cc99f275 | 4435 | ASSERT(zio->io_metaslab_class != NULL); |
3dfb57a3 DB |
4436 | |
4437 | mutex_enter(&pio->io_lock); | |
492f64e9 PD |
4438 | metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags, |
4439 | pio->io_allocator, B_TRUE); | |
3dfb57a3 DB |
4440 | mutex_exit(&pio->io_lock); |
4441 | ||
cc99f275 DB |
4442 | metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1, |
4443 | pio->io_allocator, pio); | |
3dfb57a3 DB |
4444 | |
4445 | /* | |
4446 | * Call into the pipeline to see if there is more work that | |
4447 | * needs to be done. If there is work to be done it will be | |
4448 | * dispatched to another taskq thread. | |
4449 | */ | |
492f64e9 | 4450 | zio_allocate_dispatch(zio->io_spa, pio->io_allocator); |
3dfb57a3 DB |
4451 | } |
4452 | ||
62840030 | 4453 | static zio_t * |
b128c09f | 4454 | zio_done(zio_t *zio) |
34dc7c2f | 4455 | { |
3dfb57a3 DB |
4456 | /* |
4457 | * Always attempt to keep stack usage minimal here since | |
d611989f | 4458 | * we can be called recursively up to 19 levels deep. |
3dfb57a3 | 4459 | */ |
84c07ada | 4460 | const uint64_t psize = zio->io_size; |
d164b209 | 4461 | zio_t *pio, *pio_next; |
3dfb57a3 | 4462 | zio_link_t *zl = NULL; |
34dc7c2f | 4463 | |
b128c09f | 4464 | /* |
9babb374 | 4465 | * If our children haven't all completed, |
b128c09f BB |
4466 | * wait for them and then repeat this pipeline stage. |
4467 | */ | |
ddc751d5 | 4468 | if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) { |
62840030 | 4469 | return (NULL); |
ddc751d5 | 4470 | } |
34dc7c2f | 4471 | |
3dfb57a3 DB |
4472 | /* |
4473 | * If the allocation throttle is enabled, then update the accounting. | |
4474 | * We only track child I/Os that are part of an allocating async | |
4475 | * write. We must do this since the allocation is performed | |
4476 | * by the logical I/O but the actual write is done by child I/Os. | |
4477 | */ | |
4478 | if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING && | |
4479 | zio->io_child_type == ZIO_CHILD_VDEV) { | |
cc99f275 DB |
4480 | ASSERT(zio->io_metaslab_class != NULL); |
4481 | ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled); | |
3dfb57a3 DB |
4482 | zio_dva_throttle_done(zio); |
4483 | } | |
4484 | ||
4485 | /* | |
4486 | * If the allocation throttle is enabled, verify that | |
4487 | * we have decremented the refcounts for every I/O that was throttled. | |
4488 | */ | |
4489 | if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) { | |
4490 | ASSERT(zio->io_type == ZIO_TYPE_WRITE); | |
4491 | ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); | |
4492 | ASSERT(zio->io_bp != NULL); | |
cc99f275 | 4493 | |
492f64e9 PD |
4494 | metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio, |
4495 | zio->io_allocator); | |
f8020c93 AM |
4496 | VERIFY(zfs_refcount_not_held(&zio->io_metaslab_class-> |
4497 | mc_allocator[zio->io_allocator].mca_alloc_slots, zio)); | |
3dfb57a3 DB |
4498 | } |
4499 | ||
4500 | ||
1c27024e DB |
4501 | for (int c = 0; c < ZIO_CHILD_TYPES; c++) |
4502 | for (int w = 0; w < ZIO_WAIT_TYPES; w++) | |
b128c09f BB |
4503 | ASSERT(zio->io_children[c][w] == 0); |
4504 | ||
9b67f605 | 4505 | if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) { |
c776b317 BB |
4506 | ASSERT(zio->io_bp->blk_pad[0] == 0); |
4507 | ASSERT(zio->io_bp->blk_pad[1] == 0); | |
d1d7e268 MK |
4508 | ASSERT(bcmp(zio->io_bp, &zio->io_bp_copy, |
4509 | sizeof (blkptr_t)) == 0 || | |
c776b317 BB |
4510 | (zio->io_bp == zio_unique_parent(zio)->io_bp)); |
4511 | if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) && | |
428870ff | 4512 | zio->io_bp_override == NULL && |
b128c09f | 4513 | !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { |
d1d7e268 MK |
4514 | ASSERT3U(zio->io_prop.zp_copies, <=, |
4515 | BP_GET_NDVAS(zio->io_bp)); | |
c776b317 | 4516 | ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 || |
d1d7e268 MK |
4517 | (BP_COUNT_GANG(zio->io_bp) == |
4518 | BP_GET_NDVAS(zio->io_bp))); | |
b128c09f | 4519 | } |
03c6040b GW |
4520 | if (zio->io_flags & ZIO_FLAG_NOPWRITE) |
4521 | VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig)); | |
b128c09f BB |
4522 | } |
4523 | ||
4524 | /* | |
428870ff | 4525 | * If there were child vdev/gang/ddt errors, they apply to us now. |
b128c09f BB |
4526 | */ |
4527 | zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); | |
4528 | zio_inherit_child_errors(zio, ZIO_CHILD_GANG); | |
428870ff BB |
4529 | zio_inherit_child_errors(zio, ZIO_CHILD_DDT); |
4530 | ||
4531 | /* | |
4532 | * If the I/O on the transformed data was successful, generate any | |
4533 | * checksum reports now while we still have the transformed data. | |
4534 | */ | |
4535 | if (zio->io_error == 0) { | |
4536 | while (zio->io_cksum_report != NULL) { | |
4537 | zio_cksum_report_t *zcr = zio->io_cksum_report; | |
4538 | uint64_t align = zcr->zcr_align; | |
a6255b7f | 4539 | uint64_t asize = P2ROUNDUP(psize, align); |
a6255b7f DQ |
4540 | abd_t *adata = zio->io_abd; |
4541 | ||
4542 | if (asize != psize) { | |
84c07ada | 4543 | adata = abd_alloc(asize, B_TRUE); |
a6255b7f DQ |
4544 | abd_copy(adata, zio->io_abd, psize); |
4545 | abd_zero_off(adata, psize, asize - psize); | |
428870ff BB |
4546 | } |
4547 | ||
4548 | zio->io_cksum_report = zcr->zcr_next; | |
4549 | zcr->zcr_next = NULL; | |
84c07ada | 4550 | zcr->zcr_finish(zcr, adata); |
428870ff BB |
4551 | zfs_ereport_free_checksum(zcr); |
4552 | ||
a6255b7f DQ |
4553 | if (asize != psize) |
4554 | abd_free(adata); | |
428870ff BB |
4555 | } |
4556 | } | |
b128c09f BB |
4557 | |
4558 | zio_pop_transforms(zio); /* note: may set zio->io_error */ | |
4559 | ||
a6255b7f | 4560 | vdev_stat_update(zio, psize); |
b128c09f | 4561 | |
a69052be | 4562 | /* |
cc92e9d0 | 4563 | * If this I/O is attached to a particular vdev is slow, exceeding |
72f53c56 MJ |
4564 | * 30 seconds to complete, post an error described the I/O delay. |
4565 | * We ignore these errors if the device is currently unavailable. | |
a69052be | 4566 | */ |
ad796b8a TH |
4567 | if (zio->io_delay >= MSEC2NSEC(zio_slow_io_ms)) { |
4568 | if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) { | |
4569 | /* | |
4570 | * We want to only increment our slow IO counters if | |
4571 | * the IO is valid (i.e. not if the drive is removed). | |
4572 | * | |
4573 | * zfs_ereport_post() will also do these checks, but | |
4574 | * it can also ratelimit and have other failures, so we | |
4575 | * need to increment the slow_io counters independent | |
4576 | * of it. | |
4577 | */ | |
4578 | if (zfs_ereport_is_valid(FM_EREPORT_ZFS_DELAY, | |
4579 | zio->io_spa, zio->io_vd, zio)) { | |
4580 | mutex_enter(&zio->io_vd->vdev_stat_lock); | |
4581 | zio->io_vd->vdev_stat.vs_slow_ios++; | |
4582 | mutex_exit(&zio->io_vd->vdev_stat_lock); | |
4583 | ||
1144586b | 4584 | (void) zfs_ereport_post(FM_EREPORT_ZFS_DELAY, |
ad796b8a | 4585 | zio->io_spa, zio->io_vd, &zio->io_bookmark, |
4f072827 | 4586 | zio, 0); |
ad796b8a TH |
4587 | } |
4588 | } | |
72f53c56 | 4589 | } |
a69052be | 4590 | |
b128c09f BB |
4591 | if (zio->io_error) { |
4592 | /* | |
4593 | * If this I/O is attached to a particular vdev, | |
4594 | * generate an error message describing the I/O failure | |
4595 | * at the block level. We ignore these errors if the | |
4596 | * device is currently unavailable. | |
4597 | */ | |
c776b317 | 4598 | if (zio->io_error != ECKSUM && zio->io_vd != NULL && |
2bbec1c9 | 4599 | !vdev_is_dead(zio->io_vd)) { |
4f072827 DB |
4600 | int ret = zfs_ereport_post(FM_EREPORT_ZFS_IO, |
4601 | zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0); | |
4602 | if (ret != EALREADY) { | |
4603 | mutex_enter(&zio->io_vd->vdev_stat_lock); | |
4604 | if (zio->io_type == ZIO_TYPE_READ) | |
4605 | zio->io_vd->vdev_stat.vs_read_errors++; | |
4606 | else if (zio->io_type == ZIO_TYPE_WRITE) | |
4607 | zio->io_vd->vdev_stat.vs_write_errors++; | |
4608 | mutex_exit(&zio->io_vd->vdev_stat_lock); | |
2bbec1c9 | 4609 | } |
2bbec1c9 | 4610 | } |
34dc7c2f | 4611 | |
428870ff BB |
4612 | if ((zio->io_error == EIO || !(zio->io_flags & |
4613 | (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && | |
c776b317 | 4614 | zio == zio->io_logical) { |
b128c09f BB |
4615 | /* |
4616 | * For logical I/O requests, tell the SPA to log the | |
4617 | * error and generate a logical data ereport. | |
4618 | */ | |
b5256303 | 4619 | spa_log_error(zio->io_spa, &zio->io_bookmark); |
1144586b | 4620 | (void) zfs_ereport_post(FM_EREPORT_ZFS_DATA, |
4f072827 | 4621 | zio->io_spa, NULL, &zio->io_bookmark, zio, 0); |
b128c09f BB |
4622 | } |
4623 | } | |
34dc7c2f | 4624 | |
c776b317 | 4625 | if (zio->io_error && zio == zio->io_logical) { |
b128c09f BB |
4626 | /* |
4627 | * Determine whether zio should be reexecuted. This will | |
4628 | * propagate all the way to the root via zio_notify_parent(). | |
4629 | */ | |
c776b317 | 4630 | ASSERT(zio->io_vd == NULL && zio->io_bp != NULL); |
428870ff | 4631 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); |
b128c09f | 4632 | |
428870ff BB |
4633 | if (IO_IS_ALLOCATING(zio) && |
4634 | !(zio->io_flags & ZIO_FLAG_CANFAIL)) { | |
b128c09f BB |
4635 | if (zio->io_error != ENOSPC) |
4636 | zio->io_reexecute |= ZIO_REEXECUTE_NOW; | |
4637 | else | |
4638 | zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; | |
428870ff | 4639 | } |
b128c09f BB |
4640 | |
4641 | if ((zio->io_type == ZIO_TYPE_READ || | |
4642 | zio->io_type == ZIO_TYPE_FREE) && | |
572e2857 | 4643 | !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && |
b128c09f | 4644 | zio->io_error == ENXIO && |
c776b317 BB |
4645 | spa_load_state(zio->io_spa) == SPA_LOAD_NONE && |
4646 | spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE) | |
b128c09f BB |
4647 | zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; |
4648 | ||
4649 | if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) | |
4650 | zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; | |
428870ff BB |
4651 | |
4652 | /* | |
4653 | * Here is a possibly good place to attempt to do | |
4654 | * either combinatorial reconstruction or error correction | |
4655 | * based on checksums. It also might be a good place | |
4656 | * to send out preliminary ereports before we suspend | |
4657 | * processing. | |
4658 | */ | |
34dc7c2f BB |
4659 | } |
4660 | ||
4661 | /* | |
b128c09f BB |
4662 | * If there were logical child errors, they apply to us now. |
4663 | * We defer this until now to avoid conflating logical child | |
4664 | * errors with errors that happened to the zio itself when | |
4665 | * updating vdev stats and reporting FMA events above. | |
34dc7c2f | 4666 | */ |
b128c09f | 4667 | zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); |
34dc7c2f | 4668 | |
428870ff BB |
4669 | if ((zio->io_error || zio->io_reexecute) && |
4670 | IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && | |
03c6040b | 4671 | !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE))) |
c776b317 | 4672 | zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp); |
9babb374 BB |
4673 | |
4674 | zio_gang_tree_free(&zio->io_gang_tree); | |
4675 | ||
4676 | /* | |
4677 | * Godfather I/Os should never suspend. | |
4678 | */ | |
4679 | if ((zio->io_flags & ZIO_FLAG_GODFATHER) && | |
4680 | (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) | |
a32494d2 | 4681 | zio->io_reexecute &= ~ZIO_REEXECUTE_SUSPEND; |
9babb374 | 4682 | |
b128c09f BB |
4683 | if (zio->io_reexecute) { |
4684 | /* | |
4685 | * This is a logical I/O that wants to reexecute. | |
4686 | * | |
4687 | * Reexecute is top-down. When an i/o fails, if it's not | |
4688 | * the root, it simply notifies its parent and sticks around. | |
4689 | * The parent, seeing that it still has children in zio_done(), | |
4690 | * does the same. This percolates all the way up to the root. | |
4691 | * The root i/o will reexecute or suspend the entire tree. | |
4692 | * | |
4693 | * This approach ensures that zio_reexecute() honors | |
4694 | * all the original i/o dependency relationships, e.g. | |
4695 | * parents not executing until children are ready. | |
4696 | */ | |
4697 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
34dc7c2f | 4698 | |
9babb374 | 4699 | zio->io_gang_leader = NULL; |
b128c09f | 4700 | |
d164b209 BB |
4701 | mutex_enter(&zio->io_lock); |
4702 | zio->io_state[ZIO_WAIT_DONE] = 1; | |
4703 | mutex_exit(&zio->io_lock); | |
4704 | ||
9babb374 BB |
4705 | /* |
4706 | * "The Godfather" I/O monitors its children but is | |
4707 | * not a true parent to them. It will track them through | |
4708 | * the pipeline but severs its ties whenever they get into | |
4709 | * trouble (e.g. suspended). This allows "The Godfather" | |
4710 | * I/O to return status without blocking. | |
4711 | */ | |
3dfb57a3 DB |
4712 | zl = NULL; |
4713 | for (pio = zio_walk_parents(zio, &zl); pio != NULL; | |
4714 | pio = pio_next) { | |
4715 | zio_link_t *remove_zl = zl; | |
4716 | pio_next = zio_walk_parents(zio, &zl); | |
9babb374 BB |
4717 | |
4718 | if ((pio->io_flags & ZIO_FLAG_GODFATHER) && | |
4719 | (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { | |
3dfb57a3 | 4720 | zio_remove_child(pio, zio, remove_zl); |
62840030 MA |
4721 | /* |
4722 | * This is a rare code path, so we don't | |
4723 | * bother with "next_to_execute". | |
4724 | */ | |
4725 | zio_notify_parent(pio, zio, ZIO_WAIT_DONE, | |
4726 | NULL); | |
9babb374 BB |
4727 | } |
4728 | } | |
4729 | ||
d164b209 | 4730 | if ((pio = zio_unique_parent(zio)) != NULL) { |
b128c09f BB |
4731 | /* |
4732 | * We're not a root i/o, so there's nothing to do | |
4733 | * but notify our parent. Don't propagate errors | |
4734 | * upward since we haven't permanently failed yet. | |
4735 | */ | |
9babb374 | 4736 | ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); |
b128c09f | 4737 | zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; |
62840030 MA |
4738 | /* |
4739 | * This is a rare code path, so we don't bother with | |
4740 | * "next_to_execute". | |
4741 | */ | |
4742 | zio_notify_parent(pio, zio, ZIO_WAIT_DONE, NULL); | |
b128c09f BB |
4743 | } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { |
4744 | /* | |
4745 | * We'd fail again if we reexecuted now, so suspend | |
4746 | * until conditions improve (e.g. device comes online). | |
4747 | */ | |
cec3a0a1 | 4748 | zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR); |
b128c09f BB |
4749 | } else { |
4750 | /* | |
4751 | * Reexecution is potentially a huge amount of work. | |
4752 | * Hand it off to the otherwise-unused claim taskq. | |
4753 | */ | |
a38718a6 | 4754 | ASSERT(taskq_empty_ent(&zio->io_tqent)); |
7ef5e54e AL |
4755 | spa_taskq_dispatch_ent(zio->io_spa, |
4756 | ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE, | |
a38718a6 GA |
4757 | (task_func_t *)zio_reexecute, zio, 0, |
4758 | &zio->io_tqent); | |
b128c09f | 4759 | } |
62840030 | 4760 | return (NULL); |
34dc7c2f BB |
4761 | } |
4762 | ||
428870ff | 4763 | ASSERT(zio->io_child_count == 0); |
b128c09f BB |
4764 | ASSERT(zio->io_reexecute == 0); |
4765 | ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); | |
34dc7c2f | 4766 | |
428870ff BB |
4767 | /* |
4768 | * Report any checksum errors, since the I/O is complete. | |
4769 | */ | |
4770 | while (zio->io_cksum_report != NULL) { | |
4771 | zio_cksum_report_t *zcr = zio->io_cksum_report; | |
4772 | zio->io_cksum_report = zcr->zcr_next; | |
4773 | zcr->zcr_next = NULL; | |
4774 | zcr->zcr_finish(zcr, NULL); | |
4775 | zfs_ereport_free_checksum(zcr); | |
4776 | } | |
4777 | ||
920dd524 | 4778 | if (zio->io_flags & ZIO_FLAG_FASTWRITE && zio->io_bp && |
9b67f605 MA |
4779 | !BP_IS_HOLE(zio->io_bp) && !BP_IS_EMBEDDED(zio->io_bp) && |
4780 | !(zio->io_flags & ZIO_FLAG_NOPWRITE)) { | |
920dd524 ED |
4781 | metaslab_fastwrite_unmark(zio->io_spa, zio->io_bp); |
4782 | } | |
4783 | ||
d164b209 BB |
4784 | /* |
4785 | * It is the responsibility of the done callback to ensure that this | |
4786 | * particular zio is no longer discoverable for adoption, and as | |
4787 | * such, cannot acquire any new parents. | |
4788 | */ | |
b128c09f BB |
4789 | if (zio->io_done) |
4790 | zio->io_done(zio); | |
34dc7c2f | 4791 | |
d164b209 BB |
4792 | mutex_enter(&zio->io_lock); |
4793 | zio->io_state[ZIO_WAIT_DONE] = 1; | |
4794 | mutex_exit(&zio->io_lock); | |
34dc7c2f | 4795 | |
62840030 MA |
4796 | /* |
4797 | * We are done executing this zio. We may want to execute a parent | |
4798 | * next. See the comment in zio_notify_parent(). | |
4799 | */ | |
4800 | zio_t *next_to_execute = NULL; | |
3dfb57a3 DB |
4801 | zl = NULL; |
4802 | for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) { | |
4803 | zio_link_t *remove_zl = zl; | |
4804 | pio_next = zio_walk_parents(zio, &zl); | |
4805 | zio_remove_child(pio, zio, remove_zl); | |
62840030 | 4806 | zio_notify_parent(pio, zio, ZIO_WAIT_DONE, &next_to_execute); |
b128c09f | 4807 | } |
34dc7c2f | 4808 | |
b128c09f BB |
4809 | if (zio->io_waiter != NULL) { |
4810 | mutex_enter(&zio->io_lock); | |
4811 | zio->io_executor = NULL; | |
4812 | cv_broadcast(&zio->io_cv); | |
4813 | mutex_exit(&zio->io_lock); | |
4814 | } else { | |
4815 | zio_destroy(zio); | |
4816 | } | |
34dc7c2f | 4817 | |
62840030 | 4818 | return (next_to_execute); |
34dc7c2f BB |
4819 | } |
4820 | ||
4821 | /* | |
b128c09f BB |
4822 | * ========================================================================== |
4823 | * I/O pipeline definition | |
4824 | * ========================================================================== | |
34dc7c2f | 4825 | */ |
428870ff | 4826 | static zio_pipe_stage_t *zio_pipeline[] = { |
b128c09f | 4827 | NULL, |
b128c09f | 4828 | zio_read_bp_init, |
3dfb57a3 | 4829 | zio_write_bp_init, |
428870ff BB |
4830 | zio_free_bp_init, |
4831 | zio_issue_async, | |
3dfb57a3 | 4832 | zio_write_compress, |
b5256303 | 4833 | zio_encrypt, |
b128c09f | 4834 | zio_checksum_generate, |
03c6040b | 4835 | zio_nop_write, |
428870ff BB |
4836 | zio_ddt_read_start, |
4837 | zio_ddt_read_done, | |
4838 | zio_ddt_write, | |
4839 | zio_ddt_free, | |
b128c09f BB |
4840 | zio_gang_assemble, |
4841 | zio_gang_issue, | |
3dfb57a3 | 4842 | zio_dva_throttle, |
b128c09f BB |
4843 | zio_dva_allocate, |
4844 | zio_dva_free, | |
4845 | zio_dva_claim, | |
4846 | zio_ready, | |
4847 | zio_vdev_io_start, | |
4848 | zio_vdev_io_done, | |
4849 | zio_vdev_io_assess, | |
4850 | zio_checksum_verify, | |
4851 | zio_done | |
4852 | }; | |
c28b2279 | 4853 | |
9ae529ec | 4854 | |
9ae529ec | 4855 | |
9ae529ec | 4856 | |
fcff0f35 PD |
4857 | /* |
4858 | * Compare two zbookmark_phys_t's to see which we would reach first in a | |
4859 | * pre-order traversal of the object tree. | |
4860 | * | |
4861 | * This is simple in every case aside from the meta-dnode object. For all other | |
4862 | * objects, we traverse them in order (object 1 before object 2, and so on). | |
4863 | * However, all of these objects are traversed while traversing object 0, since | |
4864 | * the data it points to is the list of objects. Thus, we need to convert to a | |
4865 | * canonical representation so we can compare meta-dnode bookmarks to | |
4866 | * non-meta-dnode bookmarks. | |
4867 | * | |
4868 | * We do this by calculating "equivalents" for each field of the zbookmark. | |
4869 | * zbookmarks outside of the meta-dnode use their own object and level, and | |
4870 | * calculate the level 0 equivalent (the first L0 blkid that is contained in the | |
4871 | * blocks this bookmark refers to) by multiplying their blkid by their span | |
4872 | * (the number of L0 blocks contained within one block at their level). | |
4873 | * zbookmarks inside the meta-dnode calculate their object equivalent | |
4874 | * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use | |
4875 | * level + 1<<31 (any value larger than a level could ever be) for their level. | |
4876 | * This causes them to always compare before a bookmark in their object | |
4877 | * equivalent, compare appropriately to bookmarks in other objects, and to | |
4878 | * compare appropriately to other bookmarks in the meta-dnode. | |
4879 | */ | |
4880 | int | |
4881 | zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2, | |
4882 | const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2) | |
4883 | { | |
4884 | /* | |
4885 | * These variables represent the "equivalent" values for the zbookmark, | |
4886 | * after converting zbookmarks inside the meta dnode to their | |
4887 | * normal-object equivalents. | |
4888 | */ | |
4889 | uint64_t zb1obj, zb2obj; | |
4890 | uint64_t zb1L0, zb2L0; | |
4891 | uint64_t zb1level, zb2level; | |
4892 | ||
4893 | if (zb1->zb_object == zb2->zb_object && | |
4894 | zb1->zb_level == zb2->zb_level && | |
4895 | zb1->zb_blkid == zb2->zb_blkid) | |
4896 | return (0); | |
9ae529ec | 4897 | |
30af21b0 PD |
4898 | IMPLY(zb1->zb_level > 0, ibs1 >= SPA_MINBLOCKSHIFT); |
4899 | IMPLY(zb2->zb_level > 0, ibs2 >= SPA_MINBLOCKSHIFT); | |
4900 | ||
fcff0f35 PD |
4901 | /* |
4902 | * BP_SPANB calculates the span in blocks. | |
4903 | */ | |
4904 | zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level); | |
4905 | zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level); | |
9ae529ec CS |
4906 | |
4907 | if (zb1->zb_object == DMU_META_DNODE_OBJECT) { | |
fcff0f35 PD |
4908 | zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); |
4909 | zb1L0 = 0; | |
4910 | zb1level = zb1->zb_level + COMPARE_META_LEVEL; | |
4911 | } else { | |
4912 | zb1obj = zb1->zb_object; | |
4913 | zb1level = zb1->zb_level; | |
9ae529ec CS |
4914 | } |
4915 | ||
fcff0f35 PD |
4916 | if (zb2->zb_object == DMU_META_DNODE_OBJECT) { |
4917 | zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); | |
4918 | zb2L0 = 0; | |
4919 | zb2level = zb2->zb_level + COMPARE_META_LEVEL; | |
4920 | } else { | |
4921 | zb2obj = zb2->zb_object; | |
4922 | zb2level = zb2->zb_level; | |
4923 | } | |
4924 | ||
4925 | /* Now that we have a canonical representation, do the comparison. */ | |
4926 | if (zb1obj != zb2obj) | |
4927 | return (zb1obj < zb2obj ? -1 : 1); | |
4928 | else if (zb1L0 != zb2L0) | |
4929 | return (zb1L0 < zb2L0 ? -1 : 1); | |
4930 | else if (zb1level != zb2level) | |
4931 | return (zb1level > zb2level ? -1 : 1); | |
4932 | /* | |
4933 | * This can (theoretically) happen if the bookmarks have the same object | |
4934 | * and level, but different blkids, if the block sizes are not the same. | |
4935 | * There is presently no way to change the indirect block sizes | |
4936 | */ | |
4937 | return (0); | |
4938 | } | |
4939 | ||
4940 | /* | |
4941 | * This function checks the following: given that last_block is the place that | |
4942 | * our traversal stopped last time, does that guarantee that we've visited | |
4943 | * every node under subtree_root? Therefore, we can't just use the raw output | |
4944 | * of zbookmark_compare. We have to pass in a modified version of | |
4945 | * subtree_root; by incrementing the block id, and then checking whether | |
4946 | * last_block is before or equal to that, we can tell whether or not having | |
4947 | * visited last_block implies that all of subtree_root's children have been | |
4948 | * visited. | |
4949 | */ | |
4950 | boolean_t | |
4951 | zbookmark_subtree_completed(const dnode_phys_t *dnp, | |
4952 | const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block) | |
4953 | { | |
4954 | zbookmark_phys_t mod_zb = *subtree_root; | |
4955 | mod_zb.zb_blkid++; | |
4956 | ASSERT(last_block->zb_level == 0); | |
4957 | ||
4958 | /* The objset_phys_t isn't before anything. */ | |
4959 | if (dnp == NULL) | |
9ae529ec | 4960 | return (B_FALSE); |
fcff0f35 PD |
4961 | |
4962 | /* | |
4963 | * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the | |
4964 | * data block size in sectors, because that variable is only used if | |
4965 | * the bookmark refers to a block in the meta-dnode. Since we don't | |
4966 | * know without examining it what object it refers to, and there's no | |
4967 | * harm in passing in this value in other cases, we always pass it in. | |
4968 | * | |
4969 | * We pass in 0 for the indirect block size shift because zb2 must be | |
4970 | * level 0. The indirect block size is only used to calculate the span | |
4971 | * of the bookmark, but since the bookmark must be level 0, the span is | |
4972 | * always 1, so the math works out. | |
4973 | * | |
4974 | * If you make changes to how the zbookmark_compare code works, be sure | |
4975 | * to make sure that this code still works afterwards. | |
4976 | */ | |
4977 | return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift, | |
4978 | 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb, | |
4979 | last_block) <= 0); | |
9ae529ec CS |
4980 | } |
4981 | ||
c28b2279 | 4982 | EXPORT_SYMBOL(zio_type_name); |
81971b13 BB |
4983 | EXPORT_SYMBOL(zio_buf_alloc); |
4984 | EXPORT_SYMBOL(zio_data_buf_alloc); | |
4985 | EXPORT_SYMBOL(zio_buf_free); | |
4986 | EXPORT_SYMBOL(zio_data_buf_free); | |
c28b2279 | 4987 | |
03fdcb9a MM |
4988 | /* BEGIN CSTYLED */ |
4989 | ZFS_MODULE_PARAM(zfs_zio, zio_, slow_io_ms, INT, ZMOD_RW, | |
ad796b8a | 4990 | "Max I/O completion time (milliseconds) before marking it as slow"); |
c409e464 | 4991 | |
03fdcb9a MM |
4992 | ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW, |
4993 | "Prioritize requeued I/O"); | |
29dee3ee | 4994 | |
03fdcb9a | 4995 | ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free, INT, ZMOD_RW, |
d1d7e268 | 4996 | "Defer frees starting in this pass"); |
29dee3ee | 4997 | |
03fdcb9a | 4998 | ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, INT, ZMOD_RW, |
d1d7e268 | 4999 | "Don't compress starting in this pass"); |
29dee3ee | 5000 | |
03fdcb9a | 5001 | ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, INT, ZMOD_RW, |
d1d7e268 | 5002 | "Rewrite new bps starting in this pass"); |
3dfb57a3 | 5003 | |
03fdcb9a | 5004 | ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW, |
3dfb57a3 | 5005 | "Throttle block allocations in the ZIO pipeline"); |
638dd5f4 | 5006 | |
03fdcb9a | 5007 | ZFS_MODULE_PARAM(zfs_zio, zio_, deadman_log_all, INT, ZMOD_RW, |
638dd5f4 | 5008 | "Log all slow ZIOs, not just those with vdevs"); |
03fdcb9a | 5009 | /* END CSTYLED */ |