]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
c3520e7f | 23 | * Copyright (c) 2011, 2015 by Delphix. All rights reserved. |
a38718a6 | 24 | * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved. |
34dc7c2f BB |
25 | */ |
26 | ||
f1512ee6 | 27 | #include <sys/sysmacros.h> |
34dc7c2f BB |
28 | #include <sys/zfs_context.h> |
29 | #include <sys/fm/fs/zfs.h> | |
30 | #include <sys/spa.h> | |
31 | #include <sys/txg.h> | |
32 | #include <sys/spa_impl.h> | |
33 | #include <sys/vdev_impl.h> | |
34 | #include <sys/zio_impl.h> | |
35 | #include <sys/zio_compress.h> | |
36 | #include <sys/zio_checksum.h> | |
428870ff BB |
37 | #include <sys/dmu_objset.h> |
38 | #include <sys/arc.h> | |
39 | #include <sys/ddt.h> | |
9b67f605 | 40 | #include <sys/blkptr.h> |
b0bc7a84 | 41 | #include <sys/zfeature.h> |
193a37cb | 42 | #include <sys/time.h> |
34dc7c2f | 43 | |
34dc7c2f BB |
44 | /* |
45 | * ========================================================================== | |
46 | * I/O type descriptions | |
47 | * ========================================================================== | |
48 | */ | |
e8b96c60 | 49 | const char *zio_type_name[ZIO_TYPES] = { |
451041db | 50 | "z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_ioctl" |
428870ff | 51 | }; |
34dc7c2f BB |
52 | |
53 | /* | |
54 | * ========================================================================== | |
55 | * I/O kmem caches | |
56 | * ========================================================================== | |
57 | */ | |
58 | kmem_cache_t *zio_cache; | |
d164b209 | 59 | kmem_cache_t *zio_link_cache; |
34dc7c2f BB |
60 | kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; |
61 | kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; | |
a69052be | 62 | int zio_delay_max = ZIO_DELAY_MAX; |
34dc7c2f | 63 | |
98b25418 GW |
64 | #define ZIO_PIPELINE_CONTINUE 0x100 |
65 | #define ZIO_PIPELINE_STOP 0x101 | |
66 | ||
fcff0f35 PD |
67 | #define BP_SPANB(indblkshift, level) \ |
68 | (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT))) | |
69 | #define COMPARE_META_LEVEL 0x80000000ul | |
55d85d5a GW |
70 | /* |
71 | * The following actions directly effect the spa's sync-to-convergence logic. | |
72 | * The values below define the sync pass when we start performing the action. | |
73 | * Care should be taken when changing these values as they directly impact | |
74 | * spa_sync() performance. Tuning these values may introduce subtle performance | |
75 | * pathologies and should only be done in the context of performance analysis. | |
76 | * These tunables will eventually be removed and replaced with #defines once | |
77 | * enough analysis has been done to determine optimal values. | |
78 | * | |
79 | * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that | |
80 | * regular blocks are not deferred. | |
81 | */ | |
82 | int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */ | |
83 | int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */ | |
84 | int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */ | |
85 | ||
34dc7c2f | 86 | /* |
b128c09f BB |
87 | * An allocating zio is one that either currently has the DVA allocate |
88 | * stage set or will have it later in its lifetime. | |
34dc7c2f | 89 | */ |
428870ff BB |
90 | #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) |
91 | ||
c409e464 | 92 | int zio_requeue_io_start_cut_in_line = 1; |
428870ff BB |
93 | |
94 | #ifdef ZFS_DEBUG | |
95 | int zio_buf_debug_limit = 16384; | |
96 | #else | |
97 | int zio_buf_debug_limit = 0; | |
98 | #endif | |
34dc7c2f | 99 | |
da6b4005 NB |
100 | static inline void __zio_execute(zio_t *zio); |
101 | ||
34dc7c2f BB |
102 | void |
103 | zio_init(void) | |
104 | { | |
105 | size_t c; | |
106 | vmem_t *data_alloc_arena = NULL; | |
107 | ||
3941503c BB |
108 | zio_cache = kmem_cache_create("zio_cache", |
109 | sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0); | |
d164b209 | 110 | zio_link_cache = kmem_cache_create("zio_link_cache", |
6795a698 | 111 | sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0); |
34dc7c2f BB |
112 | |
113 | /* | |
114 | * For small buffers, we want a cache for each multiple of | |
f1512ee6 MA |
115 | * SPA_MINBLOCKSIZE. For larger buffers, we want a cache |
116 | * for each quarter-power of 2. | |
34dc7c2f BB |
117 | */ |
118 | for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { | |
119 | size_t size = (c + 1) << SPA_MINBLOCKSHIFT; | |
120 | size_t p2 = size; | |
121 | size_t align = 0; | |
6442f3cf | 122 | size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0; |
34dc7c2f | 123 | |
f1512ee6 MA |
124 | #ifdef _ILP32 |
125 | /* | |
126 | * Cache size limited to 1M on 32-bit platforms until ARC | |
127 | * buffers no longer require virtual address space. | |
128 | */ | |
129 | if (size > zfs_max_recordsize) | |
130 | break; | |
131 | #endif | |
132 | ||
133 | while (!ISP2(p2)) | |
34dc7c2f BB |
134 | p2 &= p2 - 1; |
135 | ||
498877ba MA |
136 | #ifndef _KERNEL |
137 | /* | |
138 | * If we are using watchpoints, put each buffer on its own page, | |
139 | * to eliminate the performance overhead of trapping to the | |
140 | * kernel when modifying a non-watched buffer that shares the | |
141 | * page with a watched buffer. | |
142 | */ | |
143 | if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE)) | |
144 | continue; | |
145 | #endif | |
34dc7c2f BB |
146 | if (size <= 4 * SPA_MINBLOCKSIZE) { |
147 | align = SPA_MINBLOCKSIZE; | |
498877ba | 148 | } else if (IS_P2ALIGNED(size, p2 >> 2)) { |
f1512ee6 | 149 | align = MIN(p2 >> 2, PAGESIZE); |
34dc7c2f BB |
150 | } |
151 | ||
152 | if (align != 0) { | |
153 | char name[36]; | |
154 | (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); | |
155 | zio_buf_cache[c] = kmem_cache_create(name, size, | |
6442f3cf | 156 | align, NULL, NULL, NULL, NULL, NULL, cflags); |
34dc7c2f BB |
157 | |
158 | (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); | |
159 | zio_data_buf_cache[c] = kmem_cache_create(name, size, | |
ae6ba3db | 160 | align, NULL, NULL, NULL, NULL, |
6442f3cf | 161 | data_alloc_arena, cflags); |
34dc7c2f BB |
162 | } |
163 | } | |
164 | ||
165 | while (--c != 0) { | |
166 | ASSERT(zio_buf_cache[c] != NULL); | |
167 | if (zio_buf_cache[c - 1] == NULL) | |
168 | zio_buf_cache[c - 1] = zio_buf_cache[c]; | |
169 | ||
170 | ASSERT(zio_data_buf_cache[c] != NULL); | |
171 | if (zio_data_buf_cache[c - 1] == NULL) | |
172 | zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; | |
173 | } | |
174 | ||
34dc7c2f | 175 | zio_inject_init(); |
9759c60f ED |
176 | |
177 | lz4_init(); | |
34dc7c2f BB |
178 | } |
179 | ||
180 | void | |
181 | zio_fini(void) | |
182 | { | |
183 | size_t c; | |
184 | kmem_cache_t *last_cache = NULL; | |
185 | kmem_cache_t *last_data_cache = NULL; | |
186 | ||
187 | for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { | |
f1512ee6 MA |
188 | #ifdef _ILP32 |
189 | /* | |
190 | * Cache size limited to 1M on 32-bit platforms until ARC | |
191 | * buffers no longer require virtual address space. | |
192 | */ | |
193 | if (((c + 1) << SPA_MINBLOCKSHIFT) > zfs_max_recordsize) | |
194 | break; | |
195 | #endif | |
34dc7c2f BB |
196 | if (zio_buf_cache[c] != last_cache) { |
197 | last_cache = zio_buf_cache[c]; | |
198 | kmem_cache_destroy(zio_buf_cache[c]); | |
199 | } | |
200 | zio_buf_cache[c] = NULL; | |
201 | ||
202 | if (zio_data_buf_cache[c] != last_data_cache) { | |
203 | last_data_cache = zio_data_buf_cache[c]; | |
204 | kmem_cache_destroy(zio_data_buf_cache[c]); | |
205 | } | |
206 | zio_data_buf_cache[c] = NULL; | |
207 | } | |
208 | ||
d164b209 | 209 | kmem_cache_destroy(zio_link_cache); |
34dc7c2f BB |
210 | kmem_cache_destroy(zio_cache); |
211 | ||
212 | zio_inject_fini(); | |
9759c60f ED |
213 | |
214 | lz4_fini(); | |
34dc7c2f BB |
215 | } |
216 | ||
217 | /* | |
218 | * ========================================================================== | |
219 | * Allocate and free I/O buffers | |
220 | * ========================================================================== | |
221 | */ | |
222 | ||
223 | /* | |
224 | * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a | |
225 | * crashdump if the kernel panics, so use it judiciously. Obviously, it's | |
226 | * useful to inspect ZFS metadata, but if possible, we should avoid keeping | |
227 | * excess / transient data in-core during a crashdump. | |
228 | */ | |
229 | void * | |
230 | zio_buf_alloc(size_t size) | |
231 | { | |
232 | size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; | |
233 | ||
63e3a861 | 234 | VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); |
34dc7c2f | 235 | |
efcd79a8 | 236 | return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); |
34dc7c2f BB |
237 | } |
238 | ||
239 | /* | |
240 | * Use zio_data_buf_alloc to allocate data. The data will not appear in a | |
241 | * crashdump if the kernel panics. This exists so that we will limit the amount | |
242 | * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount | |
243 | * of kernel heap dumped to disk when the kernel panics) | |
244 | */ | |
245 | void * | |
246 | zio_data_buf_alloc(size_t size) | |
247 | { | |
248 | size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; | |
249 | ||
63e3a861 | 250 | VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); |
34dc7c2f | 251 | |
efcd79a8 | 252 | return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); |
34dc7c2f BB |
253 | } |
254 | ||
6fe53787 BB |
255 | /* |
256 | * Use zio_buf_alloc_flags when specific allocation flags are needed. e.g. | |
257 | * passing KM_NOSLEEP when it is acceptable for an allocation to fail. | |
258 | */ | |
259 | void * | |
260 | zio_buf_alloc_flags(size_t size, int flags) | |
261 | { | |
262 | size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; | |
263 | ||
264 | VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); | |
265 | ||
266 | return (kmem_cache_alloc(zio_buf_cache[c], flags)); | |
267 | } | |
268 | ||
34dc7c2f BB |
269 | void |
270 | zio_buf_free(void *buf, size_t size) | |
271 | { | |
272 | size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; | |
273 | ||
63e3a861 | 274 | VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); |
34dc7c2f BB |
275 | |
276 | kmem_cache_free(zio_buf_cache[c], buf); | |
277 | } | |
278 | ||
279 | void | |
280 | zio_data_buf_free(void *buf, size_t size) | |
281 | { | |
282 | size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; | |
283 | ||
63e3a861 | 284 | VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); |
34dc7c2f BB |
285 | |
286 | kmem_cache_free(zio_data_buf_cache[c], buf); | |
287 | } | |
288 | ||
289 | /* | |
290 | * ========================================================================== | |
291 | * Push and pop I/O transform buffers | |
292 | * ========================================================================== | |
293 | */ | |
294 | static void | |
b128c09f BB |
295 | zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize, |
296 | zio_transform_func_t *transform) | |
34dc7c2f | 297 | { |
79c76d5b | 298 | zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); |
34dc7c2f | 299 | |
b128c09f BB |
300 | zt->zt_orig_data = zio->io_data; |
301 | zt->zt_orig_size = zio->io_size; | |
34dc7c2f | 302 | zt->zt_bufsize = bufsize; |
b128c09f | 303 | zt->zt_transform = transform; |
34dc7c2f BB |
304 | |
305 | zt->zt_next = zio->io_transform_stack; | |
306 | zio->io_transform_stack = zt; | |
307 | ||
308 | zio->io_data = data; | |
309 | zio->io_size = size; | |
310 | } | |
311 | ||
312 | static void | |
b128c09f | 313 | zio_pop_transforms(zio_t *zio) |
34dc7c2f | 314 | { |
b128c09f BB |
315 | zio_transform_t *zt; |
316 | ||
317 | while ((zt = zio->io_transform_stack) != NULL) { | |
318 | if (zt->zt_transform != NULL) | |
319 | zt->zt_transform(zio, | |
320 | zt->zt_orig_data, zt->zt_orig_size); | |
34dc7c2f | 321 | |
428870ff BB |
322 | if (zt->zt_bufsize != 0) |
323 | zio_buf_free(zio->io_data, zt->zt_bufsize); | |
34dc7c2f | 324 | |
b128c09f BB |
325 | zio->io_data = zt->zt_orig_data; |
326 | zio->io_size = zt->zt_orig_size; | |
327 | zio->io_transform_stack = zt->zt_next; | |
34dc7c2f | 328 | |
b128c09f | 329 | kmem_free(zt, sizeof (zio_transform_t)); |
34dc7c2f BB |
330 | } |
331 | } | |
332 | ||
b128c09f BB |
333 | /* |
334 | * ========================================================================== | |
335 | * I/O transform callbacks for subblocks and decompression | |
336 | * ========================================================================== | |
337 | */ | |
338 | static void | |
339 | zio_subblock(zio_t *zio, void *data, uint64_t size) | |
340 | { | |
341 | ASSERT(zio->io_size > size); | |
342 | ||
343 | if (zio->io_type == ZIO_TYPE_READ) | |
344 | bcopy(zio->io_data, data, size); | |
345 | } | |
346 | ||
347 | static void | |
348 | zio_decompress(zio_t *zio, void *data, uint64_t size) | |
349 | { | |
350 | if (zio->io_error == 0 && | |
351 | zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), | |
428870ff | 352 | zio->io_data, data, zio->io_size, size) != 0) |
2e528b49 | 353 | zio->io_error = SET_ERROR(EIO); |
b128c09f BB |
354 | } |
355 | ||
356 | /* | |
357 | * ========================================================================== | |
358 | * I/O parent/child relationships and pipeline interlocks | |
359 | * ========================================================================== | |
360 | */ | |
d164b209 BB |
361 | /* |
362 | * NOTE - Callers to zio_walk_parents() and zio_walk_children must | |
363 | * continue calling these functions until they return NULL. | |
364 | * Otherwise, the next caller will pick up the list walk in | |
365 | * some indeterminate state. (Otherwise every caller would | |
366 | * have to pass in a cookie to keep the state represented by | |
367 | * io_walk_link, which gets annoying.) | |
368 | */ | |
369 | zio_t * | |
370 | zio_walk_parents(zio_t *cio) | |
371 | { | |
372 | zio_link_t *zl = cio->io_walk_link; | |
373 | list_t *pl = &cio->io_parent_list; | |
b128c09f | 374 | |
d164b209 BB |
375 | zl = (zl == NULL) ? list_head(pl) : list_next(pl, zl); |
376 | cio->io_walk_link = zl; | |
377 | ||
378 | if (zl == NULL) | |
379 | return (NULL); | |
380 | ||
381 | ASSERT(zl->zl_child == cio); | |
382 | return (zl->zl_parent); | |
383 | } | |
384 | ||
385 | zio_t * | |
386 | zio_walk_children(zio_t *pio) | |
387 | { | |
388 | zio_link_t *zl = pio->io_walk_link; | |
389 | list_t *cl = &pio->io_child_list; | |
390 | ||
391 | zl = (zl == NULL) ? list_head(cl) : list_next(cl, zl); | |
392 | pio->io_walk_link = zl; | |
393 | ||
394 | if (zl == NULL) | |
395 | return (NULL); | |
396 | ||
397 | ASSERT(zl->zl_parent == pio); | |
398 | return (zl->zl_child); | |
399 | } | |
400 | ||
401 | zio_t * | |
402 | zio_unique_parent(zio_t *cio) | |
403 | { | |
404 | zio_t *pio = zio_walk_parents(cio); | |
405 | ||
406 | VERIFY(zio_walk_parents(cio) == NULL); | |
407 | return (pio); | |
408 | } | |
409 | ||
410 | void | |
411 | zio_add_child(zio_t *pio, zio_t *cio) | |
b128c09f | 412 | { |
79c76d5b | 413 | zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); |
d6320ddb | 414 | int w; |
d164b209 BB |
415 | |
416 | /* | |
417 | * Logical I/Os can have logical, gang, or vdev children. | |
418 | * Gang I/Os can have gang or vdev children. | |
419 | * Vdev I/Os can only have vdev children. | |
420 | * The following ASSERT captures all of these constraints. | |
421 | */ | |
422 | ASSERT(cio->io_child_type <= pio->io_child_type); | |
423 | ||
424 | zl->zl_parent = pio; | |
425 | zl->zl_child = cio; | |
426 | ||
427 | mutex_enter(&cio->io_lock); | |
b128c09f | 428 | mutex_enter(&pio->io_lock); |
d164b209 BB |
429 | |
430 | ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); | |
431 | ||
d6320ddb | 432 | for (w = 0; w < ZIO_WAIT_TYPES; w++) |
d164b209 BB |
433 | pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; |
434 | ||
435 | list_insert_head(&pio->io_child_list, zl); | |
436 | list_insert_head(&cio->io_parent_list, zl); | |
437 | ||
428870ff BB |
438 | pio->io_child_count++; |
439 | cio->io_parent_count++; | |
440 | ||
b128c09f | 441 | mutex_exit(&pio->io_lock); |
d164b209 | 442 | mutex_exit(&cio->io_lock); |
b128c09f BB |
443 | } |
444 | ||
34dc7c2f | 445 | static void |
d164b209 | 446 | zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) |
b128c09f | 447 | { |
d164b209 BB |
448 | ASSERT(zl->zl_parent == pio); |
449 | ASSERT(zl->zl_child == cio); | |
b128c09f | 450 | |
d164b209 | 451 | mutex_enter(&cio->io_lock); |
b128c09f | 452 | mutex_enter(&pio->io_lock); |
d164b209 BB |
453 | |
454 | list_remove(&pio->io_child_list, zl); | |
455 | list_remove(&cio->io_parent_list, zl); | |
456 | ||
428870ff BB |
457 | pio->io_child_count--; |
458 | cio->io_parent_count--; | |
459 | ||
b128c09f | 460 | mutex_exit(&pio->io_lock); |
d164b209 BB |
461 | mutex_exit(&cio->io_lock); |
462 | ||
463 | kmem_cache_free(zio_link_cache, zl); | |
b128c09f BB |
464 | } |
465 | ||
466 | static boolean_t | |
467 | zio_wait_for_children(zio_t *zio, enum zio_child child, enum zio_wait_type wait) | |
34dc7c2f | 468 | { |
b128c09f BB |
469 | uint64_t *countp = &zio->io_children[child][wait]; |
470 | boolean_t waiting = B_FALSE; | |
471 | ||
472 | mutex_enter(&zio->io_lock); | |
473 | ASSERT(zio->io_stall == NULL); | |
474 | if (*countp != 0) { | |
428870ff | 475 | zio->io_stage >>= 1; |
b128c09f BB |
476 | zio->io_stall = countp; |
477 | waiting = B_TRUE; | |
478 | } | |
479 | mutex_exit(&zio->io_lock); | |
480 | ||
481 | return (waiting); | |
482 | } | |
34dc7c2f | 483 | |
bf701a83 BB |
484 | __attribute__((always_inline)) |
485 | static inline void | |
b128c09f BB |
486 | zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait) |
487 | { | |
488 | uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; | |
489 | int *errorp = &pio->io_child_error[zio->io_child_type]; | |
34dc7c2f | 490 | |
b128c09f BB |
491 | mutex_enter(&pio->io_lock); |
492 | if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) | |
493 | *errorp = zio_worst_error(*errorp, zio->io_error); | |
494 | pio->io_reexecute |= zio->io_reexecute; | |
495 | ASSERT3U(*countp, >, 0); | |
e8b96c60 MA |
496 | |
497 | (*countp)--; | |
498 | ||
499 | if (*countp == 0 && pio->io_stall == countp) { | |
b128c09f BB |
500 | pio->io_stall = NULL; |
501 | mutex_exit(&pio->io_lock); | |
da6b4005 | 502 | __zio_execute(pio); |
b128c09f BB |
503 | } else { |
504 | mutex_exit(&pio->io_lock); | |
34dc7c2f BB |
505 | } |
506 | } | |
507 | ||
b128c09f BB |
508 | static void |
509 | zio_inherit_child_errors(zio_t *zio, enum zio_child c) | |
510 | { | |
511 | if (zio->io_child_error[c] != 0 && zio->io_error == 0) | |
512 | zio->io_error = zio->io_child_error[c]; | |
513 | } | |
514 | ||
34dc7c2f BB |
515 | /* |
516 | * ========================================================================== | |
b128c09f | 517 | * Create the various types of I/O (read, write, free, etc) |
34dc7c2f BB |
518 | * ========================================================================== |
519 | */ | |
520 | static zio_t * | |
428870ff | 521 | zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, |
34dc7c2f | 522 | void *data, uint64_t size, zio_done_func_t *done, void *private, |
e8b96c60 | 523 | zio_type_t type, zio_priority_t priority, enum zio_flag flags, |
5dbd68a3 | 524 | vdev_t *vd, uint64_t offset, const zbookmark_phys_t *zb, |
428870ff | 525 | enum zio_stage stage, enum zio_stage pipeline) |
34dc7c2f BB |
526 | { |
527 | zio_t *zio; | |
528 | ||
529 | ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); | |
530 | ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0); | |
b128c09f BB |
531 | ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); |
532 | ||
533 | ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); | |
534 | ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); | |
535 | ASSERT(vd || stage == ZIO_STAGE_OPEN); | |
34dc7c2f | 536 | |
79c76d5b | 537 | zio = kmem_cache_alloc(zio_cache, KM_SLEEP); |
3941503c BB |
538 | bzero(zio, sizeof (zio_t)); |
539 | ||
448d7aaa | 540 | mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL); |
3941503c BB |
541 | cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); |
542 | ||
543 | list_create(&zio->io_parent_list, sizeof (zio_link_t), | |
544 | offsetof(zio_link_t, zl_parent_node)); | |
545 | list_create(&zio->io_child_list, sizeof (zio_link_t), | |
546 | offsetof(zio_link_t, zl_child_node)); | |
d164b209 | 547 | |
b128c09f BB |
548 | if (vd != NULL) |
549 | zio->io_child_type = ZIO_CHILD_VDEV; | |
550 | else if (flags & ZIO_FLAG_GANG_CHILD) | |
551 | zio->io_child_type = ZIO_CHILD_GANG; | |
428870ff BB |
552 | else if (flags & ZIO_FLAG_DDT_CHILD) |
553 | zio->io_child_type = ZIO_CHILD_DDT; | |
b128c09f BB |
554 | else |
555 | zio->io_child_type = ZIO_CHILD_LOGICAL; | |
556 | ||
34dc7c2f | 557 | if (bp != NULL) { |
428870ff | 558 | zio->io_bp = (blkptr_t *)bp; |
34dc7c2f BB |
559 | zio->io_bp_copy = *bp; |
560 | zio->io_bp_orig = *bp; | |
428870ff BB |
561 | if (type != ZIO_TYPE_WRITE || |
562 | zio->io_child_type == ZIO_CHILD_DDT) | |
b128c09f | 563 | zio->io_bp = &zio->io_bp_copy; /* so caller can free */ |
9babb374 | 564 | if (zio->io_child_type == ZIO_CHILD_LOGICAL) |
b128c09f | 565 | zio->io_logical = zio; |
9babb374 BB |
566 | if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) |
567 | pipeline |= ZIO_GANG_STAGES; | |
34dc7c2f | 568 | } |
b128c09f BB |
569 | |
570 | zio->io_spa = spa; | |
571 | zio->io_txg = txg; | |
34dc7c2f BB |
572 | zio->io_done = done; |
573 | zio->io_private = private; | |
574 | zio->io_type = type; | |
575 | zio->io_priority = priority; | |
b128c09f BB |
576 | zio->io_vd = vd; |
577 | zio->io_offset = offset; | |
428870ff BB |
578 | zio->io_orig_data = zio->io_data = data; |
579 | zio->io_orig_size = zio->io_size = size; | |
b128c09f BB |
580 | zio->io_orig_flags = zio->io_flags = flags; |
581 | zio->io_orig_stage = zio->io_stage = stage; | |
582 | zio->io_orig_pipeline = zio->io_pipeline = pipeline; | |
34dc7c2f | 583 | |
d164b209 BB |
584 | zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY); |
585 | zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); | |
586 | ||
b128c09f BB |
587 | if (zb != NULL) |
588 | zio->io_bookmark = *zb; | |
589 | ||
590 | if (pio != NULL) { | |
b128c09f | 591 | if (zio->io_logical == NULL) |
34dc7c2f | 592 | zio->io_logical = pio->io_logical; |
9babb374 BB |
593 | if (zio->io_child_type == ZIO_CHILD_GANG) |
594 | zio->io_gang_leader = pio->io_gang_leader; | |
b128c09f | 595 | zio_add_child(pio, zio); |
34dc7c2f BB |
596 | } |
597 | ||
a38718a6 GA |
598 | taskq_init_ent(&zio->io_tqent); |
599 | ||
34dc7c2f BB |
600 | return (zio); |
601 | } | |
602 | ||
603 | static void | |
b128c09f | 604 | zio_destroy(zio_t *zio) |
34dc7c2f | 605 | { |
3941503c BB |
606 | list_destroy(&zio->io_parent_list); |
607 | list_destroy(&zio->io_child_list); | |
608 | mutex_destroy(&zio->io_lock); | |
609 | cv_destroy(&zio->io_cv); | |
b128c09f | 610 | kmem_cache_free(zio_cache, zio); |
34dc7c2f BB |
611 | } |
612 | ||
613 | zio_t * | |
d164b209 | 614 | zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, |
428870ff | 615 | void *private, enum zio_flag flags) |
34dc7c2f BB |
616 | { |
617 | zio_t *zio; | |
618 | ||
619 | zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, | |
d164b209 | 620 | ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, |
b128c09f | 621 | ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); |
34dc7c2f BB |
622 | |
623 | return (zio); | |
624 | } | |
625 | ||
626 | zio_t * | |
428870ff | 627 | zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags) |
34dc7c2f | 628 | { |
d164b209 | 629 | return (zio_null(NULL, spa, NULL, done, private, flags)); |
34dc7c2f BB |
630 | } |
631 | ||
63e3a861 MA |
632 | void |
633 | zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp) | |
634 | { | |
635 | int i; | |
636 | ||
637 | if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) { | |
638 | zfs_panic_recover("blkptr at %p has invalid TYPE %llu", | |
639 | bp, (longlong_t)BP_GET_TYPE(bp)); | |
640 | } | |
641 | if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS || | |
642 | BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) { | |
643 | zfs_panic_recover("blkptr at %p has invalid CHECKSUM %llu", | |
644 | bp, (longlong_t)BP_GET_CHECKSUM(bp)); | |
645 | } | |
646 | if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS || | |
647 | BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) { | |
648 | zfs_panic_recover("blkptr at %p has invalid COMPRESS %llu", | |
649 | bp, (longlong_t)BP_GET_COMPRESS(bp)); | |
650 | } | |
651 | if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) { | |
652 | zfs_panic_recover("blkptr at %p has invalid LSIZE %llu", | |
653 | bp, (longlong_t)BP_GET_LSIZE(bp)); | |
654 | } | |
655 | if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) { | |
656 | zfs_panic_recover("blkptr at %p has invalid PSIZE %llu", | |
657 | bp, (longlong_t)BP_GET_PSIZE(bp)); | |
658 | } | |
659 | ||
660 | if (BP_IS_EMBEDDED(bp)) { | |
661 | if (BPE_GET_ETYPE(bp) > NUM_BP_EMBEDDED_TYPES) { | |
662 | zfs_panic_recover("blkptr at %p has invalid ETYPE %llu", | |
663 | bp, (longlong_t)BPE_GET_ETYPE(bp)); | |
664 | } | |
665 | } | |
666 | ||
667 | /* | |
668 | * Pool-specific checks. | |
669 | * | |
670 | * Note: it would be nice to verify that the blk_birth and | |
671 | * BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze() | |
672 | * allows the birth time of log blocks (and dmu_sync()-ed blocks | |
673 | * that are in the log) to be arbitrarily large. | |
674 | */ | |
675 | for (i = 0; i < BP_GET_NDVAS(bp); i++) { | |
676 | uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]); | |
677 | vdev_t *vd; | |
678 | uint64_t offset, asize; | |
679 | if (vdevid >= spa->spa_root_vdev->vdev_children) { | |
680 | zfs_panic_recover("blkptr at %p DVA %u has invalid " | |
681 | "VDEV %llu", | |
682 | bp, i, (longlong_t)vdevid); | |
ee3a23b8 | 683 | continue; |
63e3a861 MA |
684 | } |
685 | vd = spa->spa_root_vdev->vdev_child[vdevid]; | |
686 | if (vd == NULL) { | |
687 | zfs_panic_recover("blkptr at %p DVA %u has invalid " | |
688 | "VDEV %llu", | |
689 | bp, i, (longlong_t)vdevid); | |
ee3a23b8 | 690 | continue; |
63e3a861 MA |
691 | } |
692 | if (vd->vdev_ops == &vdev_hole_ops) { | |
693 | zfs_panic_recover("blkptr at %p DVA %u has hole " | |
694 | "VDEV %llu", | |
695 | bp, i, (longlong_t)vdevid); | |
ee3a23b8 | 696 | continue; |
63e3a861 MA |
697 | } |
698 | if (vd->vdev_ops == &vdev_missing_ops) { | |
699 | /* | |
700 | * "missing" vdevs are valid during import, but we | |
701 | * don't have their detailed info (e.g. asize), so | |
702 | * we can't perform any more checks on them. | |
703 | */ | |
704 | continue; | |
705 | } | |
706 | offset = DVA_GET_OFFSET(&bp->blk_dva[i]); | |
707 | asize = DVA_GET_ASIZE(&bp->blk_dva[i]); | |
708 | if (BP_IS_GANG(bp)) | |
709 | asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); | |
710 | if (offset + asize > vd->vdev_asize) { | |
711 | zfs_panic_recover("blkptr at %p DVA %u has invalid " | |
712 | "OFFSET %llu", | |
713 | bp, i, (longlong_t)offset); | |
714 | } | |
715 | } | |
716 | } | |
717 | ||
34dc7c2f | 718 | zio_t * |
b128c09f BB |
719 | zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, |
720 | void *data, uint64_t size, zio_done_func_t *done, void *private, | |
5dbd68a3 | 721 | zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb) |
34dc7c2f BB |
722 | { |
723 | zio_t *zio; | |
724 | ||
63e3a861 MA |
725 | zfs_blkptr_verify(spa, bp); |
726 | ||
428870ff | 727 | zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp, |
b128c09f BB |
728 | data, size, done, private, |
729 | ZIO_TYPE_READ, priority, flags, NULL, 0, zb, | |
428870ff BB |
730 | ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? |
731 | ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); | |
34dc7c2f | 732 | |
b128c09f BB |
733 | return (zio); |
734 | } | |
34dc7c2f | 735 | |
34dc7c2f | 736 | zio_t * |
b128c09f | 737 | zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, |
428870ff | 738 | void *data, uint64_t size, const zio_prop_t *zp, |
e8b96c60 MA |
739 | zio_done_func_t *ready, zio_done_func_t *physdone, zio_done_func_t *done, |
740 | void *private, | |
5dbd68a3 | 741 | zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb) |
34dc7c2f BB |
742 | { |
743 | zio_t *zio; | |
744 | ||
b128c09f BB |
745 | ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && |
746 | zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && | |
747 | zp->zp_compress >= ZIO_COMPRESS_OFF && | |
748 | zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && | |
9ae529ec | 749 | DMU_OT_IS_VALID(zp->zp_type) && |
b128c09f | 750 | zp->zp_level < 32 && |
428870ff | 751 | zp->zp_copies > 0 && |
03c6040b | 752 | zp->zp_copies <= spa_max_replication(spa)); |
34dc7c2f BB |
753 | |
754 | zio = zio_create(pio, spa, txg, bp, data, size, done, private, | |
b128c09f | 755 | ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, |
428870ff BB |
756 | ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? |
757 | ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE); | |
34dc7c2f BB |
758 | |
759 | zio->io_ready = ready; | |
e8b96c60 | 760 | zio->io_physdone = physdone; |
b128c09f | 761 | zio->io_prop = *zp; |
34dc7c2f | 762 | |
9b67f605 MA |
763 | /* |
764 | * Data can be NULL if we are going to call zio_write_override() to | |
765 | * provide the already-allocated BP. But we may need the data to | |
766 | * verify a dedup hit (if requested). In this case, don't try to | |
767 | * dedup (just take the already-allocated BP verbatim). | |
768 | */ | |
769 | if (data == NULL && zio->io_prop.zp_dedup_verify) { | |
770 | zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE; | |
771 | } | |
772 | ||
34dc7c2f BB |
773 | return (zio); |
774 | } | |
775 | ||
776 | zio_t * | |
b128c09f | 777 | zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, void *data, |
e8b96c60 | 778 | uint64_t size, zio_done_func_t *done, void *private, |
5dbd68a3 | 779 | zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb) |
34dc7c2f BB |
780 | { |
781 | zio_t *zio; | |
782 | ||
34dc7c2f | 783 | zio = zio_create(pio, spa, txg, bp, data, size, done, private, |
b128c09f BB |
784 | ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, |
785 | ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); | |
34dc7c2f BB |
786 | |
787 | return (zio); | |
788 | } | |
789 | ||
428870ff | 790 | void |
03c6040b | 791 | zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite) |
428870ff BB |
792 | { |
793 | ASSERT(zio->io_type == ZIO_TYPE_WRITE); | |
794 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
795 | ASSERT(zio->io_stage == ZIO_STAGE_OPEN); | |
796 | ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); | |
797 | ||
03c6040b GW |
798 | /* |
799 | * We must reset the io_prop to match the values that existed | |
800 | * when the bp was first written by dmu_sync() keeping in mind | |
801 | * that nopwrite and dedup are mutually exclusive. | |
802 | */ | |
803 | zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup; | |
804 | zio->io_prop.zp_nopwrite = nopwrite; | |
428870ff BB |
805 | zio->io_prop.zp_copies = copies; |
806 | zio->io_bp_override = bp; | |
807 | } | |
808 | ||
809 | void | |
810 | zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) | |
811 | { | |
9b67f605 MA |
812 | |
813 | /* | |
814 | * The check for EMBEDDED is a performance optimization. We | |
815 | * process the free here (by ignoring it) rather than | |
816 | * putting it on the list and then processing it in zio_free_sync(). | |
817 | */ | |
818 | if (BP_IS_EMBEDDED(bp)) | |
819 | return; | |
13fe0198 | 820 | metaslab_check_free(spa, bp); |
2883cad5 MA |
821 | |
822 | /* | |
823 | * Frees that are for the currently-syncing txg, are not going to be | |
824 | * deferred, and which will not need to do a read (i.e. not GANG or | |
825 | * DEDUP), can be processed immediately. Otherwise, put them on the | |
826 | * in-memory list for later processing. | |
827 | */ | |
828 | if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp) || | |
829 | txg != spa->spa_syncing_txg || | |
830 | spa_sync_pass(spa) >= zfs_sync_pass_deferred_free) { | |
831 | bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); | |
832 | } else { | |
833 | VERIFY0(zio_wait(zio_free_sync(NULL, spa, txg, bp, 0))); | |
834 | } | |
428870ff BB |
835 | } |
836 | ||
34dc7c2f | 837 | zio_t * |
428870ff BB |
838 | zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, |
839 | enum zio_flag flags) | |
34dc7c2f BB |
840 | { |
841 | zio_t *zio; | |
2883cad5 | 842 | enum zio_stage stage = ZIO_FREE_PIPELINE; |
34dc7c2f | 843 | |
428870ff BB |
844 | ASSERT(!BP_IS_HOLE(bp)); |
845 | ASSERT(spa_syncing_txg(spa) == txg); | |
55d85d5a | 846 | ASSERT(spa_sync_pass(spa) < zfs_sync_pass_deferred_free); |
34dc7c2f | 847 | |
9b67f605 MA |
848 | if (BP_IS_EMBEDDED(bp)) |
849 | return (zio_null(pio, spa, NULL, NULL, NULL, 0)); | |
850 | ||
13fe0198 | 851 | metaslab_check_free(spa, bp); |
8c841793 | 852 | arc_freed(spa, bp); |
13fe0198 | 853 | |
2883cad5 MA |
854 | /* |
855 | * GANG and DEDUP blocks can induce a read (for the gang block header, | |
856 | * or the DDT), so issue them asynchronously so that this thread is | |
857 | * not tied up. | |
858 | */ | |
859 | if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp)) | |
860 | stage |= ZIO_STAGE_ISSUE_ASYNC; | |
861 | ||
b128c09f | 862 | zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), |
2883cad5 MA |
863 | NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_NOW, flags, |
864 | NULL, 0, NULL, ZIO_STAGE_OPEN, stage); | |
865 | ||
34dc7c2f BB |
866 | return (zio); |
867 | } | |
868 | ||
869 | zio_t * | |
428870ff BB |
870 | zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, |
871 | zio_done_func_t *done, void *private, enum zio_flag flags) | |
34dc7c2f BB |
872 | { |
873 | zio_t *zio; | |
874 | ||
9b67f605 MA |
875 | dprintf_bp(bp, "claiming in txg %llu", txg); |
876 | ||
877 | if (BP_IS_EMBEDDED(bp)) | |
878 | return (zio_null(pio, spa, NULL, NULL, NULL, 0)); | |
879 | ||
34dc7c2f BB |
880 | /* |
881 | * A claim is an allocation of a specific block. Claims are needed | |
882 | * to support immediate writes in the intent log. The issue is that | |
883 | * immediate writes contain committed data, but in a txg that was | |
884 | * *not* committed. Upon opening the pool after an unclean shutdown, | |
885 | * the intent log claims all blocks that contain immediate write data | |
886 | * so that the SPA knows they're in use. | |
887 | * | |
888 | * All claims *must* be resolved in the first txg -- before the SPA | |
889 | * starts allocating blocks -- so that nothing is allocated twice. | |
428870ff | 890 | * If txg == 0 we just verify that the block is claimable. |
34dc7c2f BB |
891 | */ |
892 | ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, spa_first_txg(spa)); | |
428870ff BB |
893 | ASSERT(txg == spa_first_txg(spa) || txg == 0); |
894 | ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(1M) */ | |
34dc7c2f | 895 | |
b128c09f BB |
896 | zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), |
897 | done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, flags, | |
898 | NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); | |
34dc7c2f BB |
899 | |
900 | return (zio); | |
901 | } | |
902 | ||
903 | zio_t * | |
904 | zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, | |
e8b96c60 | 905 | zio_done_func_t *done, void *private, enum zio_flag flags) |
34dc7c2f BB |
906 | { |
907 | zio_t *zio; | |
908 | int c; | |
909 | ||
910 | if (vd->vdev_children == 0) { | |
911 | zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, | |
e8b96c60 | 912 | ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, |
34dc7c2f BB |
913 | ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); |
914 | ||
34dc7c2f BB |
915 | zio->io_cmd = cmd; |
916 | } else { | |
d164b209 | 917 | zio = zio_null(pio, spa, NULL, NULL, NULL, flags); |
34dc7c2f BB |
918 | |
919 | for (c = 0; c < vd->vdev_children; c++) | |
920 | zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, | |
e8b96c60 | 921 | done, private, flags)); |
34dc7c2f BB |
922 | } |
923 | ||
924 | return (zio); | |
925 | } | |
926 | ||
34dc7c2f BB |
927 | zio_t * |
928 | zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, | |
929 | void *data, int checksum, zio_done_func_t *done, void *private, | |
e8b96c60 | 930 | zio_priority_t priority, enum zio_flag flags, boolean_t labels) |
34dc7c2f BB |
931 | { |
932 | zio_t *zio; | |
34dc7c2f | 933 | |
b128c09f BB |
934 | ASSERT(vd->vdev_children == 0); |
935 | ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || | |
936 | offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); | |
937 | ASSERT3U(offset + size, <=, vd->vdev_psize); | |
34dc7c2f | 938 | |
b128c09f | 939 | zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, |
b02fe35d AR |
940 | ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd, offset, |
941 | NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); | |
34dc7c2f | 942 | |
b128c09f | 943 | zio->io_prop.zp_checksum = checksum; |
34dc7c2f BB |
944 | |
945 | return (zio); | |
946 | } | |
947 | ||
948 | zio_t * | |
949 | zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, | |
950 | void *data, int checksum, zio_done_func_t *done, void *private, | |
e8b96c60 | 951 | zio_priority_t priority, enum zio_flag flags, boolean_t labels) |
34dc7c2f | 952 | { |
34dc7c2f | 953 | zio_t *zio; |
34dc7c2f | 954 | |
b128c09f BB |
955 | ASSERT(vd->vdev_children == 0); |
956 | ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || | |
957 | offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); | |
958 | ASSERT3U(offset + size, <=, vd->vdev_psize); | |
34dc7c2f | 959 | |
b128c09f | 960 | zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, |
b02fe35d AR |
961 | ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd, offset, |
962 | NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); | |
34dc7c2f | 963 | |
b128c09f | 964 | zio->io_prop.zp_checksum = checksum; |
34dc7c2f | 965 | |
428870ff | 966 | if (zio_checksum_table[checksum].ci_eck) { |
34dc7c2f | 967 | /* |
428870ff | 968 | * zec checksums are necessarily destructive -- they modify |
b128c09f | 969 | * the end of the write buffer to hold the verifier/checksum. |
34dc7c2f | 970 | * Therefore, we must make a local copy in case the data is |
b128c09f | 971 | * being written to multiple places in parallel. |
34dc7c2f | 972 | */ |
b128c09f | 973 | void *wbuf = zio_buf_alloc(size); |
34dc7c2f | 974 | bcopy(data, wbuf, size); |
b128c09f | 975 | zio_push_transform(zio, wbuf, size, size, NULL); |
34dc7c2f BB |
976 | } |
977 | ||
978 | return (zio); | |
979 | } | |
980 | ||
981 | /* | |
b128c09f | 982 | * Create a child I/O to do some work for us. |
34dc7c2f BB |
983 | */ |
984 | zio_t * | |
b128c09f | 985 | zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, |
e8b96c60 MA |
986 | void *data, uint64_t size, int type, zio_priority_t priority, |
987 | enum zio_flag flags, zio_done_func_t *done, void *private) | |
34dc7c2f | 988 | { |
428870ff | 989 | enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; |
b128c09f BB |
990 | zio_t *zio; |
991 | ||
992 | ASSERT(vd->vdev_parent == | |
993 | (pio->io_vd ? pio->io_vd : pio->io_spa->spa_root_vdev)); | |
34dc7c2f BB |
994 | |
995 | if (type == ZIO_TYPE_READ && bp != NULL) { | |
996 | /* | |
997 | * If we have the bp, then the child should perform the | |
998 | * checksum and the parent need not. This pushes error | |
999 | * detection as close to the leaves as possible and | |
1000 | * eliminates redundant checksums in the interior nodes. | |
1001 | */ | |
428870ff BB |
1002 | pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; |
1003 | pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; | |
34dc7c2f BB |
1004 | } |
1005 | ||
b128c09f BB |
1006 | if (vd->vdev_children == 0) |
1007 | offset += VDEV_LABEL_START_SIZE; | |
1008 | ||
428870ff BB |
1009 | flags |= ZIO_VDEV_CHILD_FLAGS(pio) | ZIO_FLAG_DONT_PROPAGATE; |
1010 | ||
1011 | /* | |
1012 | * If we've decided to do a repair, the write is not speculative -- | |
1013 | * even if the original read was. | |
1014 | */ | |
1015 | if (flags & ZIO_FLAG_IO_REPAIR) | |
1016 | flags &= ~ZIO_FLAG_SPECULATIVE; | |
1017 | ||
b128c09f | 1018 | zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, |
428870ff BB |
1019 | done, private, type, priority, flags, vd, offset, &pio->io_bookmark, |
1020 | ZIO_STAGE_VDEV_IO_START >> 1, pipeline); | |
34dc7c2f | 1021 | |
e8b96c60 MA |
1022 | zio->io_physdone = pio->io_physdone; |
1023 | if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL) | |
1024 | zio->io_logical->io_phys_children++; | |
1025 | ||
b128c09f | 1026 | return (zio); |
34dc7c2f BB |
1027 | } |
1028 | ||
b128c09f BB |
1029 | zio_t * |
1030 | zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, void *data, uint64_t size, | |
e8b96c60 | 1031 | int type, zio_priority_t priority, enum zio_flag flags, |
428870ff | 1032 | zio_done_func_t *done, void *private) |
34dc7c2f | 1033 | { |
b128c09f | 1034 | zio_t *zio; |
34dc7c2f | 1035 | |
b128c09f | 1036 | ASSERT(vd->vdev_ops->vdev_op_leaf); |
34dc7c2f | 1037 | |
b128c09f BB |
1038 | zio = zio_create(NULL, vd->vdev_spa, 0, NULL, |
1039 | data, size, done, private, type, priority, | |
e8b96c60 | 1040 | flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED, |
b128c09f | 1041 | vd, offset, NULL, |
428870ff | 1042 | ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); |
34dc7c2f | 1043 | |
b128c09f | 1044 | return (zio); |
34dc7c2f BB |
1045 | } |
1046 | ||
1047 | void | |
b128c09f | 1048 | zio_flush(zio_t *zio, vdev_t *vd) |
34dc7c2f | 1049 | { |
b128c09f | 1050 | zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE, |
e8b96c60 | 1051 | NULL, NULL, |
b128c09f | 1052 | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY)); |
34dc7c2f BB |
1053 | } |
1054 | ||
428870ff BB |
1055 | void |
1056 | zio_shrink(zio_t *zio, uint64_t size) | |
1057 | { | |
1058 | ASSERT(zio->io_executor == NULL); | |
1059 | ASSERT(zio->io_orig_size == zio->io_size); | |
1060 | ASSERT(size <= zio->io_size); | |
1061 | ||
1062 | /* | |
1063 | * We don't shrink for raidz because of problems with the | |
1064 | * reconstruction when reading back less than the block size. | |
1065 | * Note, BP_IS_RAIDZ() assumes no compression. | |
1066 | */ | |
1067 | ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); | |
1068 | if (!BP_IS_RAIDZ(zio->io_bp)) | |
1069 | zio->io_orig_size = zio->io_size = size; | |
1070 | } | |
1071 | ||
34dc7c2f BB |
1072 | /* |
1073 | * ========================================================================== | |
b128c09f | 1074 | * Prepare to read and write logical blocks |
34dc7c2f BB |
1075 | * ========================================================================== |
1076 | */ | |
b128c09f | 1077 | |
34dc7c2f | 1078 | static int |
b128c09f | 1079 | zio_read_bp_init(zio_t *zio) |
34dc7c2f | 1080 | { |
b128c09f | 1081 | blkptr_t *bp = zio->io_bp; |
34dc7c2f | 1082 | |
fb5f0bc8 | 1083 | if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && |
9babb374 BB |
1084 | zio->io_child_type == ZIO_CHILD_LOGICAL && |
1085 | !(zio->io_flags & ZIO_FLAG_RAW)) { | |
9b67f605 MA |
1086 | uint64_t psize = |
1087 | BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp); | |
428870ff | 1088 | void *cbuf = zio_buf_alloc(psize); |
b128c09f | 1089 | |
428870ff | 1090 | zio_push_transform(zio, cbuf, psize, psize, zio_decompress); |
34dc7c2f | 1091 | } |
34dc7c2f | 1092 | |
9b67f605 MA |
1093 | if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) { |
1094 | zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; | |
1095 | decode_embedded_bp_compressed(bp, zio->io_data); | |
1096 | } else { | |
1097 | ASSERT(!BP_IS_EMBEDDED(bp)); | |
1098 | } | |
1099 | ||
9ae529ec | 1100 | if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0) |
b128c09f BB |
1101 | zio->io_flags |= ZIO_FLAG_DONT_CACHE; |
1102 | ||
428870ff BB |
1103 | if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP) |
1104 | zio->io_flags |= ZIO_FLAG_DONT_CACHE; | |
1105 | ||
1106 | if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) | |
1107 | zio->io_pipeline = ZIO_DDT_READ_PIPELINE; | |
1108 | ||
b128c09f | 1109 | return (ZIO_PIPELINE_CONTINUE); |
34dc7c2f BB |
1110 | } |
1111 | ||
b128c09f BB |
1112 | static int |
1113 | zio_write_bp_init(zio_t *zio) | |
34dc7c2f | 1114 | { |
428870ff | 1115 | spa_t *spa = zio->io_spa; |
b128c09f | 1116 | zio_prop_t *zp = &zio->io_prop; |
428870ff | 1117 | enum zio_compress compress = zp->zp_compress; |
34dc7c2f | 1118 | blkptr_t *bp = zio->io_bp; |
b128c09f | 1119 | uint64_t lsize = zio->io_size; |
428870ff | 1120 | uint64_t psize = lsize; |
b128c09f | 1121 | int pass = 1; |
34dc7c2f | 1122 | |
b128c09f BB |
1123 | /* |
1124 | * If our children haven't all reached the ready stage, | |
1125 | * wait for them and then repeat this pipeline stage. | |
1126 | */ | |
1127 | if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || | |
1128 | zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_READY)) | |
1129 | return (ZIO_PIPELINE_STOP); | |
34dc7c2f | 1130 | |
b128c09f BB |
1131 | if (!IO_IS_ALLOCATING(zio)) |
1132 | return (ZIO_PIPELINE_CONTINUE); | |
34dc7c2f | 1133 | |
428870ff BB |
1134 | ASSERT(zio->io_child_type != ZIO_CHILD_DDT); |
1135 | ||
1136 | if (zio->io_bp_override) { | |
1137 | ASSERT(bp->blk_birth != zio->io_txg); | |
1138 | ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0); | |
1139 | ||
1140 | *bp = *zio->io_bp_override; | |
1141 | zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; | |
1142 | ||
9b67f605 MA |
1143 | if (BP_IS_EMBEDDED(bp)) |
1144 | return (ZIO_PIPELINE_CONTINUE); | |
1145 | ||
03c6040b GW |
1146 | /* |
1147 | * If we've been overridden and nopwrite is set then | |
1148 | * set the flag accordingly to indicate that a nopwrite | |
1149 | * has already occurred. | |
1150 | */ | |
1151 | if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) { | |
1152 | ASSERT(!zp->zp_dedup); | |
1153 | zio->io_flags |= ZIO_FLAG_NOPWRITE; | |
1154 | return (ZIO_PIPELINE_CONTINUE); | |
1155 | } | |
1156 | ||
1157 | ASSERT(!zp->zp_nopwrite); | |
1158 | ||
428870ff BB |
1159 | if (BP_IS_HOLE(bp) || !zp->zp_dedup) |
1160 | return (ZIO_PIPELINE_CONTINUE); | |
1161 | ||
1162 | ASSERT(zio_checksum_table[zp->zp_checksum].ci_dedup || | |
1163 | zp->zp_dedup_verify); | |
1164 | ||
1165 | if (BP_GET_CHECKSUM(bp) == zp->zp_checksum) { | |
1166 | BP_SET_DEDUP(bp, 1); | |
1167 | zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; | |
1168 | return (ZIO_PIPELINE_CONTINUE); | |
1169 | } | |
5511754b MA |
1170 | zio->io_bp_override = NULL; |
1171 | BP_ZERO(bp); | |
428870ff | 1172 | } |
34dc7c2f | 1173 | |
b0bc7a84 | 1174 | if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) { |
b128c09f BB |
1175 | /* |
1176 | * We're rewriting an existing block, which means we're | |
1177 | * working on behalf of spa_sync(). For spa_sync() to | |
1178 | * converge, it must eventually be the case that we don't | |
1179 | * have to allocate new blocks. But compression changes | |
1180 | * the blocksize, which forces a reallocate, and makes | |
1181 | * convergence take longer. Therefore, after the first | |
1182 | * few passes, stop compressing to ensure convergence. | |
1183 | */ | |
428870ff BB |
1184 | pass = spa_sync_pass(spa); |
1185 | ||
1186 | ASSERT(zio->io_txg == spa_syncing_txg(spa)); | |
1187 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
1188 | ASSERT(!BP_GET_DEDUP(bp)); | |
34dc7c2f | 1189 | |
55d85d5a | 1190 | if (pass >= zfs_sync_pass_dont_compress) |
b128c09f | 1191 | compress = ZIO_COMPRESS_OFF; |
34dc7c2f | 1192 | |
b128c09f | 1193 | /* Make sure someone doesn't change their mind on overwrites */ |
9b67f605 | 1194 | ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp), |
428870ff | 1195 | spa_max_replication(spa)) == BP_GET_NDVAS(bp)); |
b128c09f | 1196 | } |
34dc7c2f | 1197 | |
b128c09f | 1198 | if (compress != ZIO_COMPRESS_OFF) { |
428870ff BB |
1199 | void *cbuf = zio_buf_alloc(lsize); |
1200 | psize = zio_compress_data(compress, zio->io_data, cbuf, lsize); | |
1201 | if (psize == 0 || psize == lsize) { | |
b128c09f | 1202 | compress = ZIO_COMPRESS_OFF; |
428870ff | 1203 | zio_buf_free(cbuf, lsize); |
9b67f605 MA |
1204 | } else if (!zp->zp_dedup && psize <= BPE_PAYLOAD_SIZE && |
1205 | zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) && | |
1206 | spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) { | |
1207 | encode_embedded_bp_compressed(bp, | |
1208 | cbuf, compress, lsize, psize); | |
1209 | BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA); | |
1210 | BP_SET_TYPE(bp, zio->io_prop.zp_type); | |
1211 | BP_SET_LEVEL(bp, zio->io_prop.zp_level); | |
1212 | zio_buf_free(cbuf, lsize); | |
1213 | bp->blk_birth = zio->io_txg; | |
1214 | zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; | |
1215 | ASSERT(spa_feature_is_active(spa, | |
1216 | SPA_FEATURE_EMBEDDED_DATA)); | |
1217 | return (ZIO_PIPELINE_CONTINUE); | |
428870ff | 1218 | } else { |
9b67f605 | 1219 | /* |
c3520e7f MA |
1220 | * Round up compressed size up to the ashift |
1221 | * of the smallest-ashift device, and zero the tail. | |
1222 | * This ensures that the compressed size of the BP | |
1223 | * (and thus compressratio property) are correct, | |
1224 | * in that we charge for the padding used to fill out | |
1225 | * the last sector. | |
9b67f605 | 1226 | */ |
c3520e7f MA |
1227 | size_t rounded; |
1228 | ||
1229 | ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT); | |
1230 | ||
1231 | rounded = (size_t)P2ROUNDUP(psize, | |
1232 | 1ULL << spa->spa_min_ashift); | |
1233 | if (rounded >= lsize) { | |
9b67f605 MA |
1234 | compress = ZIO_COMPRESS_OFF; |
1235 | zio_buf_free(cbuf, lsize); | |
c3520e7f | 1236 | psize = lsize; |
9b67f605 | 1237 | } else { |
c3520e7f MA |
1238 | bzero((char *)cbuf + psize, rounded - psize); |
1239 | psize = rounded; | |
9b67f605 MA |
1240 | zio_push_transform(zio, cbuf, |
1241 | psize, lsize, NULL); | |
1242 | } | |
b128c09f BB |
1243 | } |
1244 | } | |
34dc7c2f | 1245 | |
b128c09f BB |
1246 | /* |
1247 | * The final pass of spa_sync() must be all rewrites, but the first | |
1248 | * few passes offer a trade-off: allocating blocks defers convergence, | |
1249 | * but newly allocated blocks are sequential, so they can be written | |
1250 | * to disk faster. Therefore, we allow the first few passes of | |
1251 | * spa_sync() to allocate new blocks, but force rewrites after that. | |
1252 | * There should only be a handful of blocks after pass 1 in any case. | |
1253 | */ | |
b0bc7a84 MG |
1254 | if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg && |
1255 | BP_GET_PSIZE(bp) == psize && | |
55d85d5a | 1256 | pass >= zfs_sync_pass_rewrite) { |
428870ff | 1257 | enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; |
d6320ddb | 1258 | ASSERT(psize != 0); |
b128c09f BB |
1259 | zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; |
1260 | zio->io_flags |= ZIO_FLAG_IO_REWRITE; | |
1261 | } else { | |
1262 | BP_ZERO(bp); | |
1263 | zio->io_pipeline = ZIO_WRITE_PIPELINE; | |
1264 | } | |
34dc7c2f | 1265 | |
428870ff | 1266 | if (psize == 0) { |
b0bc7a84 MG |
1267 | if (zio->io_bp_orig.blk_birth != 0 && |
1268 | spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) { | |
1269 | BP_SET_LSIZE(bp, lsize); | |
1270 | BP_SET_TYPE(bp, zp->zp_type); | |
1271 | BP_SET_LEVEL(bp, zp->zp_level); | |
1272 | BP_SET_BIRTH(bp, zio->io_txg, 0); | |
1273 | } | |
b128c09f BB |
1274 | zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; |
1275 | } else { | |
1276 | ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); | |
1277 | BP_SET_LSIZE(bp, lsize); | |
b0bc7a84 MG |
1278 | BP_SET_TYPE(bp, zp->zp_type); |
1279 | BP_SET_LEVEL(bp, zp->zp_level); | |
428870ff | 1280 | BP_SET_PSIZE(bp, psize); |
b128c09f BB |
1281 | BP_SET_COMPRESS(bp, compress); |
1282 | BP_SET_CHECKSUM(bp, zp->zp_checksum); | |
428870ff | 1283 | BP_SET_DEDUP(bp, zp->zp_dedup); |
b128c09f | 1284 | BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); |
428870ff BB |
1285 | if (zp->zp_dedup) { |
1286 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
1287 | ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); | |
1288 | zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; | |
1289 | } | |
03c6040b GW |
1290 | if (zp->zp_nopwrite) { |
1291 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
1292 | ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); | |
1293 | zio->io_pipeline |= ZIO_STAGE_NOP_WRITE; | |
1294 | } | |
428870ff BB |
1295 | } |
1296 | ||
1297 | return (ZIO_PIPELINE_CONTINUE); | |
1298 | } | |
1299 | ||
1300 | static int | |
1301 | zio_free_bp_init(zio_t *zio) | |
1302 | { | |
1303 | blkptr_t *bp = zio->io_bp; | |
1304 | ||
1305 | if (zio->io_child_type == ZIO_CHILD_LOGICAL) { | |
1306 | if (BP_GET_DEDUP(bp)) | |
1307 | zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; | |
b128c09f | 1308 | } |
34dc7c2f BB |
1309 | |
1310 | return (ZIO_PIPELINE_CONTINUE); | |
1311 | } | |
1312 | ||
b128c09f BB |
1313 | /* |
1314 | * ========================================================================== | |
1315 | * Execute the I/O pipeline | |
1316 | * ========================================================================== | |
1317 | */ | |
1318 | ||
1319 | static void | |
7ef5e54e | 1320 | zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline) |
34dc7c2f | 1321 | { |
428870ff | 1322 | spa_t *spa = zio->io_spa; |
b128c09f | 1323 | zio_type_t t = zio->io_type; |
a38718a6 | 1324 | int flags = (cutinline ? TQ_FRONT : 0); |
34dc7c2f BB |
1325 | |
1326 | /* | |
9babb374 BB |
1327 | * If we're a config writer or a probe, the normal issue and |
1328 | * interrupt threads may all be blocked waiting for the config lock. | |
1329 | * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. | |
34dc7c2f | 1330 | */ |
9babb374 | 1331 | if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) |
b128c09f | 1332 | t = ZIO_TYPE_NULL; |
34dc7c2f BB |
1333 | |
1334 | /* | |
b128c09f | 1335 | * A similar issue exists for the L2ARC write thread until L2ARC 2.0. |
34dc7c2f | 1336 | */ |
b128c09f BB |
1337 | if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) |
1338 | t = ZIO_TYPE_NULL; | |
34dc7c2f | 1339 | |
428870ff | 1340 | /* |
7ef5e54e AL |
1341 | * If this is a high priority I/O, then use the high priority taskq if |
1342 | * available. | |
428870ff BB |
1343 | */ |
1344 | if (zio->io_priority == ZIO_PRIORITY_NOW && | |
7ef5e54e | 1345 | spa->spa_zio_taskq[t][q + 1].stqs_count != 0) |
428870ff BB |
1346 | q++; |
1347 | ||
1348 | ASSERT3U(q, <, ZIO_TASKQ_TYPES); | |
5cc556b4 | 1349 | |
a38718a6 GA |
1350 | /* |
1351 | * NB: We are assuming that the zio can only be dispatched | |
1352 | * to a single taskq at a time. It would be a grievous error | |
1353 | * to dispatch the zio to another taskq at the same time. | |
1354 | */ | |
1355 | ASSERT(taskq_empty_ent(&zio->io_tqent)); | |
7ef5e54e AL |
1356 | spa_taskq_dispatch_ent(spa, t, q, (task_func_t *)zio_execute, zio, |
1357 | flags, &zio->io_tqent); | |
b128c09f | 1358 | } |
34dc7c2f | 1359 | |
b128c09f | 1360 | static boolean_t |
7ef5e54e | 1361 | zio_taskq_member(zio_t *zio, zio_taskq_type_t q) |
b128c09f BB |
1362 | { |
1363 | kthread_t *executor = zio->io_executor; | |
1364 | spa_t *spa = zio->io_spa; | |
d6320ddb | 1365 | zio_type_t t; |
34dc7c2f | 1366 | |
7ef5e54e AL |
1367 | for (t = 0; t < ZIO_TYPES; t++) { |
1368 | spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; | |
1369 | uint_t i; | |
1370 | for (i = 0; i < tqs->stqs_count; i++) { | |
1371 | if (taskq_member(tqs->stqs_taskq[i], executor)) | |
1372 | return (B_TRUE); | |
1373 | } | |
1374 | } | |
34dc7c2f | 1375 | |
b128c09f BB |
1376 | return (B_FALSE); |
1377 | } | |
34dc7c2f | 1378 | |
b128c09f BB |
1379 | static int |
1380 | zio_issue_async(zio_t *zio) | |
1381 | { | |
428870ff | 1382 | zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); |
b128c09f BB |
1383 | |
1384 | return (ZIO_PIPELINE_STOP); | |
34dc7c2f BB |
1385 | } |
1386 | ||
b128c09f BB |
1387 | void |
1388 | zio_interrupt(zio_t *zio) | |
34dc7c2f | 1389 | { |
428870ff | 1390 | zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); |
b128c09f | 1391 | } |
34dc7c2f | 1392 | |
b128c09f BB |
1393 | /* |
1394 | * Execute the I/O pipeline until one of the following occurs: | |
1395 | * (1) the I/O completes; (2) the pipeline stalls waiting for | |
1396 | * dependent child I/Os; (3) the I/O issues, so we're waiting | |
1397 | * for an I/O completion interrupt; (4) the I/O is delegated by | |
1398 | * vdev-level caching or aggregation; (5) the I/O is deferred | |
1399 | * due to vdev-level queueing; (6) the I/O is handed off to | |
1400 | * another thread. In all cases, the pipeline stops whenever | |
8e07b99b | 1401 | * there's no CPU work; it never burns a thread in cv_wait_io(). |
b128c09f BB |
1402 | * |
1403 | * There's no locking on io_stage because there's no legitimate way | |
1404 | * for multiple threads to be attempting to process the same I/O. | |
1405 | */ | |
428870ff | 1406 | static zio_pipe_stage_t *zio_pipeline[]; |
34dc7c2f | 1407 | |
da6b4005 NB |
1408 | /* |
1409 | * zio_execute() is a wrapper around the static function | |
1410 | * __zio_execute() so that we can force __zio_execute() to be | |
1411 | * inlined. This reduces stack overhead which is important | |
1412 | * because __zio_execute() is called recursively in several zio | |
1413 | * code paths. zio_execute() itself cannot be inlined because | |
1414 | * it is externally visible. | |
1415 | */ | |
b128c09f BB |
1416 | void |
1417 | zio_execute(zio_t *zio) | |
da6b4005 | 1418 | { |
92119cc2 BB |
1419 | fstrans_cookie_t cookie; |
1420 | ||
1421 | cookie = spl_fstrans_mark(); | |
da6b4005 | 1422 | __zio_execute(zio); |
92119cc2 | 1423 | spl_fstrans_unmark(cookie); |
da6b4005 NB |
1424 | } |
1425 | ||
b58986ee BB |
1426 | /* |
1427 | * Used to determine if in the current context the stack is sized large | |
1428 | * enough to allow zio_execute() to be called recursively. A minimum | |
1429 | * stack size of 16K is required to avoid needing to re-dispatch the zio. | |
1430 | */ | |
1431 | boolean_t | |
1432 | zio_execute_stack_check(zio_t *zio) | |
1433 | { | |
1434 | #if !defined(HAVE_LARGE_STACKS) | |
1435 | dsl_pool_t *dp = spa_get_dsl(zio->io_spa); | |
1436 | ||
1437 | /* Executing in txg_sync_thread() context. */ | |
1438 | if (dp && curthread == dp->dp_tx.tx_sync_thread) | |
1439 | return (B_TRUE); | |
1440 | ||
1441 | /* Pool initialization outside of zio_taskq context. */ | |
1442 | if (dp && spa_is_initializing(dp->dp_spa) && | |
1443 | !zio_taskq_member(zio, ZIO_TASKQ_ISSUE) && | |
1444 | !zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH)) | |
1445 | return (B_TRUE); | |
1446 | #endif /* HAVE_LARGE_STACKS */ | |
1447 | ||
1448 | return (B_FALSE); | |
1449 | } | |
1450 | ||
da6b4005 NB |
1451 | __attribute__((always_inline)) |
1452 | static inline void | |
1453 | __zio_execute(zio_t *zio) | |
b128c09f BB |
1454 | { |
1455 | zio->io_executor = curthread; | |
34dc7c2f | 1456 | |
b128c09f | 1457 | while (zio->io_stage < ZIO_STAGE_DONE) { |
428870ff BB |
1458 | enum zio_stage pipeline = zio->io_pipeline; |
1459 | enum zio_stage stage = zio->io_stage; | |
b128c09f | 1460 | int rv; |
34dc7c2f | 1461 | |
b128c09f | 1462 | ASSERT(!MUTEX_HELD(&zio->io_lock)); |
428870ff BB |
1463 | ASSERT(ISP2(stage)); |
1464 | ASSERT(zio->io_stall == NULL); | |
34dc7c2f | 1465 | |
428870ff BB |
1466 | do { |
1467 | stage <<= 1; | |
1468 | } while ((stage & pipeline) == 0); | |
b128c09f BB |
1469 | |
1470 | ASSERT(stage <= ZIO_STAGE_DONE); | |
34dc7c2f BB |
1471 | |
1472 | /* | |
b128c09f BB |
1473 | * If we are in interrupt context and this pipeline stage |
1474 | * will grab a config lock that is held across I/O, | |
428870ff BB |
1475 | * or may wait for an I/O that needs an interrupt thread |
1476 | * to complete, issue async to avoid deadlock. | |
1477 | * | |
1478 | * For VDEV_IO_START, we cut in line so that the io will | |
1479 | * be sent to disk promptly. | |
34dc7c2f | 1480 | */ |
91579709 BB |
1481 | if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && |
1482 | zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { | |
b58986ee BB |
1483 | boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? |
1484 | zio_requeue_io_start_cut_in_line : B_FALSE; | |
91579709 BB |
1485 | zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); |
1486 | return; | |
1487 | } | |
1488 | ||
1489 | /* | |
b58986ee BB |
1490 | * If the current context doesn't have large enough stacks |
1491 | * the zio must be issued asynchronously to prevent overflow. | |
91579709 | 1492 | */ |
b58986ee BB |
1493 | if (zio_execute_stack_check(zio)) { |
1494 | boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ? | |
1495 | zio_requeue_io_start_cut_in_line : B_FALSE; | |
428870ff | 1496 | zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); |
b128c09f | 1497 | return; |
34dc7c2f BB |
1498 | } |
1499 | ||
b128c09f | 1500 | zio->io_stage = stage; |
9bd274dd | 1501 | rv = zio_pipeline[highbit64(stage) - 1](zio); |
34dc7c2f | 1502 | |
b128c09f BB |
1503 | if (rv == ZIO_PIPELINE_STOP) |
1504 | return; | |
34dc7c2f | 1505 | |
b128c09f BB |
1506 | ASSERT(rv == ZIO_PIPELINE_CONTINUE); |
1507 | } | |
34dc7c2f BB |
1508 | } |
1509 | ||
da6b4005 | 1510 | |
b128c09f BB |
1511 | /* |
1512 | * ========================================================================== | |
1513 | * Initiate I/O, either sync or async | |
1514 | * ========================================================================== | |
1515 | */ | |
1516 | int | |
1517 | zio_wait(zio_t *zio) | |
34dc7c2f | 1518 | { |
b128c09f | 1519 | int error; |
34dc7c2f | 1520 | |
b128c09f BB |
1521 | ASSERT(zio->io_stage == ZIO_STAGE_OPEN); |
1522 | ASSERT(zio->io_executor == NULL); | |
34dc7c2f | 1523 | |
b128c09f | 1524 | zio->io_waiter = curthread; |
34dc7c2f | 1525 | |
da6b4005 | 1526 | __zio_execute(zio); |
34dc7c2f | 1527 | |
b128c09f | 1528 | mutex_enter(&zio->io_lock); |
72f53c56 | 1529 | while (zio->io_executor != NULL) |
72938d69 | 1530 | cv_wait_io(&zio->io_cv, &zio->io_lock); |
b128c09f | 1531 | mutex_exit(&zio->io_lock); |
34dc7c2f | 1532 | |
b128c09f BB |
1533 | error = zio->io_error; |
1534 | zio_destroy(zio); | |
34dc7c2f | 1535 | |
b128c09f BB |
1536 | return (error); |
1537 | } | |
34dc7c2f | 1538 | |
b128c09f BB |
1539 | void |
1540 | zio_nowait(zio_t *zio) | |
1541 | { | |
1542 | ASSERT(zio->io_executor == NULL); | |
34dc7c2f | 1543 | |
d164b209 BB |
1544 | if (zio->io_child_type == ZIO_CHILD_LOGICAL && |
1545 | zio_unique_parent(zio) == NULL) { | |
8878261f BB |
1546 | zio_t *pio; |
1547 | ||
34dc7c2f | 1548 | /* |
b128c09f | 1549 | * This is a logical async I/O with no parent to wait for it. |
9babb374 BB |
1550 | * We add it to the spa_async_root_zio "Godfather" I/O which |
1551 | * will ensure they complete prior to unloading the pool. | |
34dc7c2f | 1552 | */ |
b128c09f | 1553 | spa_t *spa = zio->io_spa; |
8878261f BB |
1554 | kpreempt_disable(); |
1555 | pio = spa->spa_async_zio_root[CPU_SEQID]; | |
1556 | kpreempt_enable(); | |
9babb374 | 1557 | |
8878261f | 1558 | zio_add_child(pio, zio); |
b128c09f | 1559 | } |
34dc7c2f | 1560 | |
da6b4005 | 1561 | __zio_execute(zio); |
b128c09f | 1562 | } |
34dc7c2f | 1563 | |
b128c09f BB |
1564 | /* |
1565 | * ========================================================================== | |
1566 | * Reexecute or suspend/resume failed I/O | |
1567 | * ========================================================================== | |
1568 | */ | |
34dc7c2f | 1569 | |
b128c09f BB |
1570 | static void |
1571 | zio_reexecute(zio_t *pio) | |
1572 | { | |
d164b209 | 1573 | zio_t *cio, *cio_next; |
d6320ddb | 1574 | int c, w; |
d164b209 BB |
1575 | |
1576 | ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); | |
1577 | ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); | |
9babb374 BB |
1578 | ASSERT(pio->io_gang_leader == NULL); |
1579 | ASSERT(pio->io_gang_tree == NULL); | |
34dc7c2f | 1580 | |
b128c09f BB |
1581 | pio->io_flags = pio->io_orig_flags; |
1582 | pio->io_stage = pio->io_orig_stage; | |
1583 | pio->io_pipeline = pio->io_orig_pipeline; | |
1584 | pio->io_reexecute = 0; | |
03c6040b | 1585 | pio->io_flags |= ZIO_FLAG_REEXECUTED; |
b128c09f | 1586 | pio->io_error = 0; |
d6320ddb | 1587 | for (w = 0; w < ZIO_WAIT_TYPES; w++) |
d164b209 | 1588 | pio->io_state[w] = 0; |
d6320ddb | 1589 | for (c = 0; c < ZIO_CHILD_TYPES; c++) |
b128c09f | 1590 | pio->io_child_error[c] = 0; |
34dc7c2f | 1591 | |
428870ff BB |
1592 | if (IO_IS_ALLOCATING(pio)) |
1593 | BP_ZERO(pio->io_bp); | |
34dc7c2f | 1594 | |
b128c09f BB |
1595 | /* |
1596 | * As we reexecute pio's children, new children could be created. | |
d164b209 | 1597 | * New children go to the head of pio's io_child_list, however, |
b128c09f | 1598 | * so we will (correctly) not reexecute them. The key is that |
d164b209 BB |
1599 | * the remainder of pio's io_child_list, from 'cio_next' onward, |
1600 | * cannot be affected by any side effects of reexecuting 'cio'. | |
b128c09f | 1601 | */ |
d164b209 BB |
1602 | for (cio = zio_walk_children(pio); cio != NULL; cio = cio_next) { |
1603 | cio_next = zio_walk_children(pio); | |
b128c09f | 1604 | mutex_enter(&pio->io_lock); |
d6320ddb | 1605 | for (w = 0; w < ZIO_WAIT_TYPES; w++) |
d164b209 | 1606 | pio->io_children[cio->io_child_type][w]++; |
b128c09f | 1607 | mutex_exit(&pio->io_lock); |
d164b209 | 1608 | zio_reexecute(cio); |
34dc7c2f | 1609 | } |
34dc7c2f | 1610 | |
b128c09f BB |
1611 | /* |
1612 | * Now that all children have been reexecuted, execute the parent. | |
9babb374 BB |
1613 | * We don't reexecute "The Godfather" I/O here as it's the |
1614 | * responsibility of the caller to wait on him. | |
b128c09f | 1615 | */ |
9babb374 | 1616 | if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) |
da6b4005 | 1617 | __zio_execute(pio); |
34dc7c2f BB |
1618 | } |
1619 | ||
b128c09f BB |
1620 | void |
1621 | zio_suspend(spa_t *spa, zio_t *zio) | |
34dc7c2f | 1622 | { |
b128c09f BB |
1623 | if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) |
1624 | fm_panic("Pool '%s' has encountered an uncorrectable I/O " | |
1625 | "failure and the failure mode property for this pool " | |
1626 | "is set to panic.", spa_name(spa)); | |
34dc7c2f | 1627 | |
bf89c199 BB |
1628 | cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable I/O " |
1629 | "failure and has been suspended.\n", spa_name(spa)); | |
1630 | ||
b128c09f | 1631 | zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, NULL, 0, 0); |
34dc7c2f | 1632 | |
b128c09f | 1633 | mutex_enter(&spa->spa_suspend_lock); |
34dc7c2f | 1634 | |
b128c09f | 1635 | if (spa->spa_suspend_zio_root == NULL) |
9babb374 BB |
1636 | spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, |
1637 | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | | |
1638 | ZIO_FLAG_GODFATHER); | |
34dc7c2f | 1639 | |
b128c09f | 1640 | spa->spa_suspended = B_TRUE; |
34dc7c2f | 1641 | |
b128c09f | 1642 | if (zio != NULL) { |
9babb374 | 1643 | ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); |
b128c09f BB |
1644 | ASSERT(zio != spa->spa_suspend_zio_root); |
1645 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
d164b209 | 1646 | ASSERT(zio_unique_parent(zio) == NULL); |
b128c09f BB |
1647 | ASSERT(zio->io_stage == ZIO_STAGE_DONE); |
1648 | zio_add_child(spa->spa_suspend_zio_root, zio); | |
1649 | } | |
34dc7c2f | 1650 | |
b128c09f BB |
1651 | mutex_exit(&spa->spa_suspend_lock); |
1652 | } | |
34dc7c2f | 1653 | |
9babb374 | 1654 | int |
b128c09f BB |
1655 | zio_resume(spa_t *spa) |
1656 | { | |
9babb374 | 1657 | zio_t *pio; |
34dc7c2f BB |
1658 | |
1659 | /* | |
b128c09f | 1660 | * Reexecute all previously suspended i/o. |
34dc7c2f | 1661 | */ |
b128c09f BB |
1662 | mutex_enter(&spa->spa_suspend_lock); |
1663 | spa->spa_suspended = B_FALSE; | |
1664 | cv_broadcast(&spa->spa_suspend_cv); | |
1665 | pio = spa->spa_suspend_zio_root; | |
1666 | spa->spa_suspend_zio_root = NULL; | |
1667 | mutex_exit(&spa->spa_suspend_lock); | |
1668 | ||
1669 | if (pio == NULL) | |
9babb374 | 1670 | return (0); |
34dc7c2f | 1671 | |
9babb374 BB |
1672 | zio_reexecute(pio); |
1673 | return (zio_wait(pio)); | |
b128c09f BB |
1674 | } |
1675 | ||
1676 | void | |
1677 | zio_resume_wait(spa_t *spa) | |
1678 | { | |
1679 | mutex_enter(&spa->spa_suspend_lock); | |
1680 | while (spa_suspended(spa)) | |
1681 | cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); | |
1682 | mutex_exit(&spa->spa_suspend_lock); | |
34dc7c2f BB |
1683 | } |
1684 | ||
1685 | /* | |
1686 | * ========================================================================== | |
b128c09f BB |
1687 | * Gang blocks. |
1688 | * | |
1689 | * A gang block is a collection of small blocks that looks to the DMU | |
1690 | * like one large block. When zio_dva_allocate() cannot find a block | |
1691 | * of the requested size, due to either severe fragmentation or the pool | |
1692 | * being nearly full, it calls zio_write_gang_block() to construct the | |
1693 | * block from smaller fragments. | |
1694 | * | |
1695 | * A gang block consists of a gang header (zio_gbh_phys_t) and up to | |
1696 | * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like | |
1697 | * an indirect block: it's an array of block pointers. It consumes | |
1698 | * only one sector and hence is allocatable regardless of fragmentation. | |
1699 | * The gang header's bps point to its gang members, which hold the data. | |
1700 | * | |
1701 | * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> | |
1702 | * as the verifier to ensure uniqueness of the SHA256 checksum. | |
1703 | * Critically, the gang block bp's blk_cksum is the checksum of the data, | |
1704 | * not the gang header. This ensures that data block signatures (needed for | |
1705 | * deduplication) are independent of how the block is physically stored. | |
1706 | * | |
1707 | * Gang blocks can be nested: a gang member may itself be a gang block. | |
1708 | * Thus every gang block is a tree in which root and all interior nodes are | |
1709 | * gang headers, and the leaves are normal blocks that contain user data. | |
1710 | * The root of the gang tree is called the gang leader. | |
1711 | * | |
1712 | * To perform any operation (read, rewrite, free, claim) on a gang block, | |
1713 | * zio_gang_assemble() first assembles the gang tree (minus data leaves) | |
1714 | * in the io_gang_tree field of the original logical i/o by recursively | |
1715 | * reading the gang leader and all gang headers below it. This yields | |
1716 | * an in-core tree containing the contents of every gang header and the | |
1717 | * bps for every constituent of the gang block. | |
1718 | * | |
1719 | * With the gang tree now assembled, zio_gang_issue() just walks the gang tree | |
1720 | * and invokes a callback on each bp. To free a gang block, zio_gang_issue() | |
1721 | * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. | |
1722 | * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). | |
1723 | * zio_read_gang() is a wrapper around zio_read() that omits reading gang | |
1724 | * headers, since we already have those in io_gang_tree. zio_rewrite_gang() | |
1725 | * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() | |
1726 | * of the gang header plus zio_checksum_compute() of the data to update the | |
1727 | * gang header's blk_cksum as described above. | |
1728 | * | |
1729 | * The two-phase assemble/issue model solves the problem of partial failure -- | |
1730 | * what if you'd freed part of a gang block but then couldn't read the | |
1731 | * gang header for another part? Assembling the entire gang tree first | |
1732 | * ensures that all the necessary gang header I/O has succeeded before | |
1733 | * starting the actual work of free, claim, or write. Once the gang tree | |
1734 | * is assembled, free and claim are in-memory operations that cannot fail. | |
1735 | * | |
1736 | * In the event that a gang write fails, zio_dva_unallocate() walks the | |
1737 | * gang tree to immediately free (i.e. insert back into the space map) | |
1738 | * everything we've allocated. This ensures that we don't get ENOSPC | |
1739 | * errors during repeated suspend/resume cycles due to a flaky device. | |
1740 | * | |
1741 | * Gang rewrites only happen during sync-to-convergence. If we can't assemble | |
1742 | * the gang tree, we won't modify the block, so we can safely defer the free | |
1743 | * (knowing that the block is still intact). If we *can* assemble the gang | |
1744 | * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free | |
1745 | * each constituent bp and we can allocate a new block on the next sync pass. | |
1746 | * | |
1747 | * In all cases, the gang tree allows complete recovery from partial failure. | |
34dc7c2f BB |
1748 | * ========================================================================== |
1749 | */ | |
b128c09f BB |
1750 | |
1751 | static zio_t * | |
1752 | zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) | |
34dc7c2f | 1753 | { |
b128c09f BB |
1754 | if (gn != NULL) |
1755 | return (pio); | |
34dc7c2f | 1756 | |
b128c09f BB |
1757 | return (zio_read(pio, pio->io_spa, bp, data, BP_GET_PSIZE(bp), |
1758 | NULL, NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), | |
1759 | &pio->io_bookmark)); | |
1760 | } | |
1761 | ||
1762 | zio_t * | |
1763 | zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) | |
1764 | { | |
1765 | zio_t *zio; | |
1766 | ||
1767 | if (gn != NULL) { | |
1768 | zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, | |
1769 | gn->gn_gbh, SPA_GANGBLOCKSIZE, NULL, NULL, pio->io_priority, | |
1770 | ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); | |
34dc7c2f | 1771 | /* |
b128c09f BB |
1772 | * As we rewrite each gang header, the pipeline will compute |
1773 | * a new gang block header checksum for it; but no one will | |
1774 | * compute a new data checksum, so we do that here. The one | |
1775 | * exception is the gang leader: the pipeline already computed | |
1776 | * its data checksum because that stage precedes gang assembly. | |
1777 | * (Presently, nothing actually uses interior data checksums; | |
1778 | * this is just good hygiene.) | |
34dc7c2f | 1779 | */ |
9babb374 | 1780 | if (gn != pio->io_gang_leader->io_gang_tree) { |
b128c09f BB |
1781 | zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), |
1782 | data, BP_GET_PSIZE(bp)); | |
1783 | } | |
428870ff BB |
1784 | /* |
1785 | * If we are here to damage data for testing purposes, | |
1786 | * leave the GBH alone so that we can detect the damage. | |
1787 | */ | |
1788 | if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) | |
1789 | zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; | |
34dc7c2f | 1790 | } else { |
b128c09f BB |
1791 | zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, |
1792 | data, BP_GET_PSIZE(bp), NULL, NULL, pio->io_priority, | |
1793 | ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); | |
34dc7c2f BB |
1794 | } |
1795 | ||
b128c09f BB |
1796 | return (zio); |
1797 | } | |
34dc7c2f | 1798 | |
b128c09f BB |
1799 | /* ARGSUSED */ |
1800 | zio_t * | |
1801 | zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) | |
1802 | { | |
428870ff BB |
1803 | return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, |
1804 | ZIO_GANG_CHILD_FLAGS(pio))); | |
34dc7c2f BB |
1805 | } |
1806 | ||
b128c09f BB |
1807 | /* ARGSUSED */ |
1808 | zio_t * | |
1809 | zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) | |
34dc7c2f | 1810 | { |
b128c09f BB |
1811 | return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, |
1812 | NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); | |
1813 | } | |
1814 | ||
1815 | static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { | |
1816 | NULL, | |
1817 | zio_read_gang, | |
1818 | zio_rewrite_gang, | |
1819 | zio_free_gang, | |
1820 | zio_claim_gang, | |
1821 | NULL | |
1822 | }; | |
34dc7c2f | 1823 | |
b128c09f | 1824 | static void zio_gang_tree_assemble_done(zio_t *zio); |
34dc7c2f | 1825 | |
b128c09f BB |
1826 | static zio_gang_node_t * |
1827 | zio_gang_node_alloc(zio_gang_node_t **gnpp) | |
1828 | { | |
1829 | zio_gang_node_t *gn; | |
34dc7c2f | 1830 | |
b128c09f | 1831 | ASSERT(*gnpp == NULL); |
34dc7c2f | 1832 | |
79c76d5b | 1833 | gn = kmem_zalloc(sizeof (*gn), KM_SLEEP); |
b128c09f BB |
1834 | gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); |
1835 | *gnpp = gn; | |
34dc7c2f | 1836 | |
b128c09f | 1837 | return (gn); |
34dc7c2f BB |
1838 | } |
1839 | ||
34dc7c2f | 1840 | static void |
b128c09f | 1841 | zio_gang_node_free(zio_gang_node_t **gnpp) |
34dc7c2f | 1842 | { |
b128c09f | 1843 | zio_gang_node_t *gn = *gnpp; |
d6320ddb | 1844 | int g; |
34dc7c2f | 1845 | |
d6320ddb | 1846 | for (g = 0; g < SPA_GBH_NBLKPTRS; g++) |
b128c09f BB |
1847 | ASSERT(gn->gn_child[g] == NULL); |
1848 | ||
1849 | zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); | |
1850 | kmem_free(gn, sizeof (*gn)); | |
1851 | *gnpp = NULL; | |
34dc7c2f BB |
1852 | } |
1853 | ||
b128c09f BB |
1854 | static void |
1855 | zio_gang_tree_free(zio_gang_node_t **gnpp) | |
34dc7c2f | 1856 | { |
b128c09f | 1857 | zio_gang_node_t *gn = *gnpp; |
d6320ddb | 1858 | int g; |
34dc7c2f | 1859 | |
b128c09f BB |
1860 | if (gn == NULL) |
1861 | return; | |
34dc7c2f | 1862 | |
d6320ddb | 1863 | for (g = 0; g < SPA_GBH_NBLKPTRS; g++) |
b128c09f | 1864 | zio_gang_tree_free(&gn->gn_child[g]); |
34dc7c2f | 1865 | |
b128c09f | 1866 | zio_gang_node_free(gnpp); |
34dc7c2f BB |
1867 | } |
1868 | ||
b128c09f | 1869 | static void |
9babb374 | 1870 | zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) |
34dc7c2f | 1871 | { |
b128c09f BB |
1872 | zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); |
1873 | ||
9babb374 | 1874 | ASSERT(gio->io_gang_leader == gio); |
b128c09f | 1875 | ASSERT(BP_IS_GANG(bp)); |
34dc7c2f | 1876 | |
9babb374 | 1877 | zio_nowait(zio_read(gio, gio->io_spa, bp, gn->gn_gbh, |
b128c09f | 1878 | SPA_GANGBLOCKSIZE, zio_gang_tree_assemble_done, gn, |
9babb374 | 1879 | gio->io_priority, ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); |
b128c09f | 1880 | } |
34dc7c2f | 1881 | |
b128c09f BB |
1882 | static void |
1883 | zio_gang_tree_assemble_done(zio_t *zio) | |
1884 | { | |
9babb374 | 1885 | zio_t *gio = zio->io_gang_leader; |
b128c09f BB |
1886 | zio_gang_node_t *gn = zio->io_private; |
1887 | blkptr_t *bp = zio->io_bp; | |
d6320ddb | 1888 | int g; |
34dc7c2f | 1889 | |
9babb374 | 1890 | ASSERT(gio == zio_unique_parent(zio)); |
428870ff | 1891 | ASSERT(zio->io_child_count == 0); |
34dc7c2f | 1892 | |
b128c09f BB |
1893 | if (zio->io_error) |
1894 | return; | |
34dc7c2f | 1895 | |
b128c09f BB |
1896 | if (BP_SHOULD_BYTESWAP(bp)) |
1897 | byteswap_uint64_array(zio->io_data, zio->io_size); | |
34dc7c2f | 1898 | |
b128c09f BB |
1899 | ASSERT(zio->io_data == gn->gn_gbh); |
1900 | ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); | |
428870ff | 1901 | ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); |
34dc7c2f | 1902 | |
d6320ddb | 1903 | for (g = 0; g < SPA_GBH_NBLKPTRS; g++) { |
b128c09f BB |
1904 | blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; |
1905 | if (!BP_IS_GANG(gbp)) | |
1906 | continue; | |
9babb374 | 1907 | zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); |
b128c09f | 1908 | } |
34dc7c2f BB |
1909 | } |
1910 | ||
b128c09f BB |
1911 | static void |
1912 | zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, void *data) | |
34dc7c2f | 1913 | { |
9babb374 | 1914 | zio_t *gio = pio->io_gang_leader; |
b128c09f | 1915 | zio_t *zio; |
d6320ddb | 1916 | int g; |
34dc7c2f | 1917 | |
b128c09f | 1918 | ASSERT(BP_IS_GANG(bp) == !!gn); |
9babb374 BB |
1919 | ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); |
1920 | ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); | |
34dc7c2f | 1921 | |
b128c09f BB |
1922 | /* |
1923 | * If you're a gang header, your data is in gn->gn_gbh. | |
1924 | * If you're a gang member, your data is in 'data' and gn == NULL. | |
1925 | */ | |
9babb374 | 1926 | zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data); |
34dc7c2f | 1927 | |
b128c09f | 1928 | if (gn != NULL) { |
428870ff | 1929 | ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); |
34dc7c2f | 1930 | |
d6320ddb | 1931 | for (g = 0; g < SPA_GBH_NBLKPTRS; g++) { |
b128c09f BB |
1932 | blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; |
1933 | if (BP_IS_HOLE(gbp)) | |
1934 | continue; | |
1935 | zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data); | |
1936 | data = (char *)data + BP_GET_PSIZE(gbp); | |
1937 | } | |
34dc7c2f BB |
1938 | } |
1939 | ||
9babb374 BB |
1940 | if (gn == gio->io_gang_tree) |
1941 | ASSERT3P((char *)gio->io_data + gio->io_size, ==, data); | |
34dc7c2f | 1942 | |
b128c09f BB |
1943 | if (zio != pio) |
1944 | zio_nowait(zio); | |
34dc7c2f BB |
1945 | } |
1946 | ||
1947 | static int | |
b128c09f | 1948 | zio_gang_assemble(zio_t *zio) |
34dc7c2f | 1949 | { |
b128c09f | 1950 | blkptr_t *bp = zio->io_bp; |
34dc7c2f | 1951 | |
9babb374 BB |
1952 | ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); |
1953 | ASSERT(zio->io_child_type > ZIO_CHILD_GANG); | |
1954 | ||
1955 | zio->io_gang_leader = zio; | |
34dc7c2f | 1956 | |
b128c09f | 1957 | zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); |
34dc7c2f BB |
1958 | |
1959 | return (ZIO_PIPELINE_CONTINUE); | |
1960 | } | |
1961 | ||
1962 | static int | |
b128c09f | 1963 | zio_gang_issue(zio_t *zio) |
34dc7c2f | 1964 | { |
b128c09f | 1965 | blkptr_t *bp = zio->io_bp; |
34dc7c2f | 1966 | |
b128c09f BB |
1967 | if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE)) |
1968 | return (ZIO_PIPELINE_STOP); | |
34dc7c2f | 1969 | |
9babb374 BB |
1970 | ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); |
1971 | ASSERT(zio->io_child_type > ZIO_CHILD_GANG); | |
34dc7c2f | 1972 | |
b128c09f | 1973 | if (zio->io_child_error[ZIO_CHILD_GANG] == 0) |
9babb374 | 1974 | zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_data); |
b128c09f | 1975 | else |
9babb374 | 1976 | zio_gang_tree_free(&zio->io_gang_tree); |
34dc7c2f | 1977 | |
b128c09f | 1978 | zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; |
34dc7c2f BB |
1979 | |
1980 | return (ZIO_PIPELINE_CONTINUE); | |
1981 | } | |
1982 | ||
1983 | static void | |
b128c09f | 1984 | zio_write_gang_member_ready(zio_t *zio) |
34dc7c2f | 1985 | { |
d164b209 | 1986 | zio_t *pio = zio_unique_parent(zio); |
34dc7c2f BB |
1987 | dva_t *cdva = zio->io_bp->blk_dva; |
1988 | dva_t *pdva = pio->io_bp->blk_dva; | |
1989 | uint64_t asize; | |
d6320ddb | 1990 | int d; |
d1d7e268 | 1991 | ASSERTV(zio_t *gio = zio->io_gang_leader); |
34dc7c2f | 1992 | |
b128c09f BB |
1993 | if (BP_IS_HOLE(zio->io_bp)) |
1994 | return; | |
1995 | ||
1996 | ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); | |
1997 | ||
1998 | ASSERT(zio->io_child_type == ZIO_CHILD_GANG); | |
428870ff BB |
1999 | ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); |
2000 | ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); | |
2001 | ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); | |
34dc7c2f | 2002 | ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); |
34dc7c2f BB |
2003 | |
2004 | mutex_enter(&pio->io_lock); | |
d6320ddb | 2005 | for (d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { |
34dc7c2f BB |
2006 | ASSERT(DVA_GET_GANG(&pdva[d])); |
2007 | asize = DVA_GET_ASIZE(&pdva[d]); | |
2008 | asize += DVA_GET_ASIZE(&cdva[d]); | |
2009 | DVA_SET_ASIZE(&pdva[d], asize); | |
2010 | } | |
2011 | mutex_exit(&pio->io_lock); | |
2012 | } | |
2013 | ||
2014 | static int | |
b128c09f | 2015 | zio_write_gang_block(zio_t *pio) |
34dc7c2f | 2016 | { |
b128c09f BB |
2017 | spa_t *spa = pio->io_spa; |
2018 | blkptr_t *bp = pio->io_bp; | |
9babb374 | 2019 | zio_t *gio = pio->io_gang_leader; |
b128c09f BB |
2020 | zio_t *zio; |
2021 | zio_gang_node_t *gn, **gnpp; | |
34dc7c2f | 2022 | zio_gbh_phys_t *gbh; |
b128c09f BB |
2023 | uint64_t txg = pio->io_txg; |
2024 | uint64_t resid = pio->io_size; | |
2025 | uint64_t lsize; | |
428870ff BB |
2026 | int copies = gio->io_prop.zp_copies; |
2027 | int gbh_copies = MIN(copies + 1, spa_max_replication(spa)); | |
b128c09f | 2028 | zio_prop_t zp; |
d6320ddb | 2029 | int g, error; |
34dc7c2f | 2030 | |
428870ff BB |
2031 | error = metaslab_alloc(spa, spa_normal_class(spa), SPA_GANGBLOCKSIZE, |
2032 | bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, | |
b128c09f | 2033 | METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER); |
34dc7c2f | 2034 | if (error) { |
b128c09f | 2035 | pio->io_error = error; |
34dc7c2f BB |
2036 | return (ZIO_PIPELINE_CONTINUE); |
2037 | } | |
2038 | ||
9babb374 BB |
2039 | if (pio == gio) { |
2040 | gnpp = &gio->io_gang_tree; | |
b128c09f BB |
2041 | } else { |
2042 | gnpp = pio->io_private; | |
2043 | ASSERT(pio->io_ready == zio_write_gang_member_ready); | |
34dc7c2f BB |
2044 | } |
2045 | ||
b128c09f BB |
2046 | gn = zio_gang_node_alloc(gnpp); |
2047 | gbh = gn->gn_gbh; | |
2048 | bzero(gbh, SPA_GANGBLOCKSIZE); | |
34dc7c2f | 2049 | |
b128c09f BB |
2050 | /* |
2051 | * Create the gang header. | |
2052 | */ | |
2053 | zio = zio_rewrite(pio, spa, txg, bp, gbh, SPA_GANGBLOCKSIZE, NULL, NULL, | |
2054 | pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); | |
34dc7c2f | 2055 | |
b128c09f BB |
2056 | /* |
2057 | * Create and nowait the gang children. | |
2058 | */ | |
d6320ddb | 2059 | for (g = 0; resid != 0; resid -= lsize, g++) { |
b128c09f BB |
2060 | lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), |
2061 | SPA_MINBLOCKSIZE); | |
2062 | ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); | |
2063 | ||
9babb374 | 2064 | zp.zp_checksum = gio->io_prop.zp_checksum; |
b128c09f BB |
2065 | zp.zp_compress = ZIO_COMPRESS_OFF; |
2066 | zp.zp_type = DMU_OT_NONE; | |
2067 | zp.zp_level = 0; | |
428870ff | 2068 | zp.zp_copies = gio->io_prop.zp_copies; |
03c6040b GW |
2069 | zp.zp_dedup = B_FALSE; |
2070 | zp.zp_dedup_verify = B_FALSE; | |
2071 | zp.zp_nopwrite = B_FALSE; | |
b128c09f BB |
2072 | |
2073 | zio_nowait(zio_write(zio, spa, txg, &gbh->zg_blkptr[g], | |
2074 | (char *)pio->io_data + (pio->io_size - resid), lsize, &zp, | |
e8b96c60 | 2075 | zio_write_gang_member_ready, NULL, NULL, &gn->gn_child[g], |
b128c09f BB |
2076 | pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), |
2077 | &pio->io_bookmark)); | |
2078 | } | |
34dc7c2f BB |
2079 | |
2080 | /* | |
b128c09f | 2081 | * Set pio's pipeline to just wait for zio to finish. |
34dc7c2f | 2082 | */ |
b128c09f BB |
2083 | pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; |
2084 | ||
920dd524 ED |
2085 | /* |
2086 | * We didn't allocate this bp, so make sure it doesn't get unmarked. | |
2087 | */ | |
2088 | pio->io_flags &= ~ZIO_FLAG_FASTWRITE; | |
2089 | ||
b128c09f BB |
2090 | zio_nowait(zio); |
2091 | ||
2092 | return (ZIO_PIPELINE_CONTINUE); | |
34dc7c2f BB |
2093 | } |
2094 | ||
03c6040b GW |
2095 | /* |
2096 | * The zio_nop_write stage in the pipeline determines if allocating | |
2097 | * a new bp is necessary. By leveraging a cryptographically secure checksum, | |
2098 | * such as SHA256, we can compare the checksums of the new data and the old | |
2099 | * to determine if allocating a new block is required. The nopwrite | |
2100 | * feature can handle writes in either syncing or open context (i.e. zil | |
2101 | * writes) and as a result is mutually exclusive with dedup. | |
2102 | */ | |
2103 | static int | |
2104 | zio_nop_write(zio_t *zio) | |
2105 | { | |
2106 | blkptr_t *bp = zio->io_bp; | |
2107 | blkptr_t *bp_orig = &zio->io_bp_orig; | |
2108 | zio_prop_t *zp = &zio->io_prop; | |
2109 | ||
2110 | ASSERT(BP_GET_LEVEL(bp) == 0); | |
2111 | ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); | |
2112 | ASSERT(zp->zp_nopwrite); | |
2113 | ASSERT(!zp->zp_dedup); | |
2114 | ASSERT(zio->io_bp_override == NULL); | |
2115 | ASSERT(IO_IS_ALLOCATING(zio)); | |
2116 | ||
2117 | /* | |
2118 | * Check to see if the original bp and the new bp have matching | |
2119 | * characteristics (i.e. same checksum, compression algorithms, etc). | |
2120 | * If they don't then just continue with the pipeline which will | |
2121 | * allocate a new bp. | |
2122 | */ | |
2123 | if (BP_IS_HOLE(bp_orig) || | |
2124 | !zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_dedup || | |
2125 | BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) || | |
2126 | BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) || | |
2127 | BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) || | |
2128 | zp->zp_copies != BP_GET_NDVAS(bp_orig)) | |
2129 | return (ZIO_PIPELINE_CONTINUE); | |
2130 | ||
2131 | /* | |
2132 | * If the checksums match then reset the pipeline so that we | |
2133 | * avoid allocating a new bp and issuing any I/O. | |
2134 | */ | |
2135 | if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) { | |
2136 | ASSERT(zio_checksum_table[zp->zp_checksum].ci_dedup); | |
2137 | ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig)); | |
2138 | ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig)); | |
2139 | ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF); | |
2140 | ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop, | |
2141 | sizeof (uint64_t)) == 0); | |
2142 | ||
2143 | *bp = *bp_orig; | |
2144 | zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; | |
2145 | zio->io_flags |= ZIO_FLAG_NOPWRITE; | |
2146 | } | |
2147 | ||
2148 | return (ZIO_PIPELINE_CONTINUE); | |
2149 | } | |
2150 | ||
34dc7c2f BB |
2151 | /* |
2152 | * ========================================================================== | |
428870ff | 2153 | * Dedup |
34dc7c2f BB |
2154 | * ========================================================================== |
2155 | */ | |
428870ff BB |
2156 | static void |
2157 | zio_ddt_child_read_done(zio_t *zio) | |
2158 | { | |
2159 | blkptr_t *bp = zio->io_bp; | |
2160 | ddt_entry_t *dde = zio->io_private; | |
2161 | ddt_phys_t *ddp; | |
2162 | zio_t *pio = zio_unique_parent(zio); | |
2163 | ||
2164 | mutex_enter(&pio->io_lock); | |
2165 | ddp = ddt_phys_select(dde, bp); | |
2166 | if (zio->io_error == 0) | |
2167 | ddt_phys_clear(ddp); /* this ddp doesn't need repair */ | |
2168 | if (zio->io_error == 0 && dde->dde_repair_data == NULL) | |
2169 | dde->dde_repair_data = zio->io_data; | |
2170 | else | |
2171 | zio_buf_free(zio->io_data, zio->io_size); | |
2172 | mutex_exit(&pio->io_lock); | |
2173 | } | |
2174 | ||
2175 | static int | |
2176 | zio_ddt_read_start(zio_t *zio) | |
2177 | { | |
2178 | blkptr_t *bp = zio->io_bp; | |
d6320ddb | 2179 | int p; |
428870ff BB |
2180 | |
2181 | ASSERT(BP_GET_DEDUP(bp)); | |
2182 | ASSERT(BP_GET_PSIZE(bp) == zio->io_size); | |
2183 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
2184 | ||
2185 | if (zio->io_child_error[ZIO_CHILD_DDT]) { | |
2186 | ddt_t *ddt = ddt_select(zio->io_spa, bp); | |
2187 | ddt_entry_t *dde = ddt_repair_start(ddt, bp); | |
2188 | ddt_phys_t *ddp = dde->dde_phys; | |
2189 | ddt_phys_t *ddp_self = ddt_phys_select(dde, bp); | |
2190 | blkptr_t blk; | |
2191 | ||
2192 | ASSERT(zio->io_vsd == NULL); | |
2193 | zio->io_vsd = dde; | |
2194 | ||
2195 | if (ddp_self == NULL) | |
2196 | return (ZIO_PIPELINE_CONTINUE); | |
2197 | ||
d6320ddb | 2198 | for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { |
428870ff BB |
2199 | if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) |
2200 | continue; | |
2201 | ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, | |
2202 | &blk); | |
2203 | zio_nowait(zio_read(zio, zio->io_spa, &blk, | |
2204 | zio_buf_alloc(zio->io_size), zio->io_size, | |
2205 | zio_ddt_child_read_done, dde, zio->io_priority, | |
2206 | ZIO_DDT_CHILD_FLAGS(zio) | ZIO_FLAG_DONT_PROPAGATE, | |
2207 | &zio->io_bookmark)); | |
2208 | } | |
2209 | return (ZIO_PIPELINE_CONTINUE); | |
2210 | } | |
2211 | ||
2212 | zio_nowait(zio_read(zio, zio->io_spa, bp, | |
2213 | zio->io_data, zio->io_size, NULL, NULL, zio->io_priority, | |
2214 | ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); | |
2215 | ||
2216 | return (ZIO_PIPELINE_CONTINUE); | |
2217 | } | |
2218 | ||
2219 | static int | |
2220 | zio_ddt_read_done(zio_t *zio) | |
2221 | { | |
2222 | blkptr_t *bp = zio->io_bp; | |
2223 | ||
2224 | if (zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE)) | |
2225 | return (ZIO_PIPELINE_STOP); | |
2226 | ||
2227 | ASSERT(BP_GET_DEDUP(bp)); | |
2228 | ASSERT(BP_GET_PSIZE(bp) == zio->io_size); | |
2229 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
2230 | ||
2231 | if (zio->io_child_error[ZIO_CHILD_DDT]) { | |
2232 | ddt_t *ddt = ddt_select(zio->io_spa, bp); | |
2233 | ddt_entry_t *dde = zio->io_vsd; | |
2234 | if (ddt == NULL) { | |
2235 | ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); | |
2236 | return (ZIO_PIPELINE_CONTINUE); | |
2237 | } | |
2238 | if (dde == NULL) { | |
2239 | zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; | |
2240 | zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); | |
2241 | return (ZIO_PIPELINE_STOP); | |
2242 | } | |
2243 | if (dde->dde_repair_data != NULL) { | |
2244 | bcopy(dde->dde_repair_data, zio->io_data, zio->io_size); | |
2245 | zio->io_child_error[ZIO_CHILD_DDT] = 0; | |
2246 | } | |
2247 | ddt_repair_done(ddt, dde); | |
2248 | zio->io_vsd = NULL; | |
2249 | } | |
2250 | ||
2251 | ASSERT(zio->io_vsd == NULL); | |
2252 | ||
2253 | return (ZIO_PIPELINE_CONTINUE); | |
2254 | } | |
2255 | ||
2256 | static boolean_t | |
2257 | zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) | |
2258 | { | |
2259 | spa_t *spa = zio->io_spa; | |
d6320ddb | 2260 | int p; |
428870ff BB |
2261 | |
2262 | /* | |
2263 | * Note: we compare the original data, not the transformed data, | |
2264 | * because when zio->io_bp is an override bp, we will not have | |
2265 | * pushed the I/O transforms. That's an important optimization | |
2266 | * because otherwise we'd compress/encrypt all dmu_sync() data twice. | |
2267 | */ | |
d6320ddb | 2268 | for (p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { |
428870ff BB |
2269 | zio_t *lio = dde->dde_lead_zio[p]; |
2270 | ||
2271 | if (lio != NULL) { | |
2272 | return (lio->io_orig_size != zio->io_orig_size || | |
2273 | bcmp(zio->io_orig_data, lio->io_orig_data, | |
2274 | zio->io_orig_size) != 0); | |
2275 | } | |
2276 | } | |
2277 | ||
d6320ddb | 2278 | for (p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { |
428870ff BB |
2279 | ddt_phys_t *ddp = &dde->dde_phys[p]; |
2280 | ||
2281 | if (ddp->ddp_phys_birth != 0) { | |
2282 | arc_buf_t *abuf = NULL; | |
2a432414 | 2283 | arc_flags_t aflags = ARC_FLAG_WAIT; |
428870ff BB |
2284 | blkptr_t blk = *zio->io_bp; |
2285 | int error; | |
2286 | ||
2287 | ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); | |
2288 | ||
2289 | ddt_exit(ddt); | |
2290 | ||
294f6806 | 2291 | error = arc_read(NULL, spa, &blk, |
428870ff BB |
2292 | arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, |
2293 | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, | |
2294 | &aflags, &zio->io_bookmark); | |
2295 | ||
2296 | if (error == 0) { | |
2297 | if (arc_buf_size(abuf) != zio->io_orig_size || | |
2298 | bcmp(abuf->b_data, zio->io_orig_data, | |
2299 | zio->io_orig_size) != 0) | |
2e528b49 | 2300 | error = SET_ERROR(EEXIST); |
13fe0198 | 2301 | VERIFY(arc_buf_remove_ref(abuf, &abuf)); |
428870ff BB |
2302 | } |
2303 | ||
2304 | ddt_enter(ddt); | |
2305 | return (error != 0); | |
2306 | } | |
2307 | } | |
2308 | ||
2309 | return (B_FALSE); | |
2310 | } | |
2311 | ||
2312 | static void | |
2313 | zio_ddt_child_write_ready(zio_t *zio) | |
2314 | { | |
2315 | int p = zio->io_prop.zp_copies; | |
2316 | ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); | |
2317 | ddt_entry_t *dde = zio->io_private; | |
2318 | ddt_phys_t *ddp = &dde->dde_phys[p]; | |
2319 | zio_t *pio; | |
2320 | ||
2321 | if (zio->io_error) | |
2322 | return; | |
2323 | ||
2324 | ddt_enter(ddt); | |
2325 | ||
2326 | ASSERT(dde->dde_lead_zio[p] == zio); | |
2327 | ||
2328 | ddt_phys_fill(ddp, zio->io_bp); | |
2329 | ||
2330 | while ((pio = zio_walk_parents(zio)) != NULL) | |
2331 | ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); | |
2332 | ||
2333 | ddt_exit(ddt); | |
2334 | } | |
2335 | ||
2336 | static void | |
2337 | zio_ddt_child_write_done(zio_t *zio) | |
2338 | { | |
2339 | int p = zio->io_prop.zp_copies; | |
2340 | ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); | |
2341 | ddt_entry_t *dde = zio->io_private; | |
2342 | ddt_phys_t *ddp = &dde->dde_phys[p]; | |
2343 | ||
2344 | ddt_enter(ddt); | |
2345 | ||
2346 | ASSERT(ddp->ddp_refcnt == 0); | |
2347 | ASSERT(dde->dde_lead_zio[p] == zio); | |
2348 | dde->dde_lead_zio[p] = NULL; | |
2349 | ||
2350 | if (zio->io_error == 0) { | |
2351 | while (zio_walk_parents(zio) != NULL) | |
2352 | ddt_phys_addref(ddp); | |
2353 | } else { | |
2354 | ddt_phys_clear(ddp); | |
2355 | } | |
2356 | ||
2357 | ddt_exit(ddt); | |
2358 | } | |
2359 | ||
2360 | static void | |
2361 | zio_ddt_ditto_write_done(zio_t *zio) | |
2362 | { | |
2363 | int p = DDT_PHYS_DITTO; | |
428870ff BB |
2364 | blkptr_t *bp = zio->io_bp; |
2365 | ddt_t *ddt = ddt_select(zio->io_spa, bp); | |
2366 | ddt_entry_t *dde = zio->io_private; | |
2367 | ddt_phys_t *ddp = &dde->dde_phys[p]; | |
2368 | ddt_key_t *ddk = &dde->dde_key; | |
1fde1e37 | 2369 | ASSERTV(zio_prop_t *zp = &zio->io_prop); |
428870ff BB |
2370 | |
2371 | ddt_enter(ddt); | |
2372 | ||
2373 | ASSERT(ddp->ddp_refcnt == 0); | |
2374 | ASSERT(dde->dde_lead_zio[p] == zio); | |
2375 | dde->dde_lead_zio[p] = NULL; | |
2376 | ||
2377 | if (zio->io_error == 0) { | |
2378 | ASSERT(ZIO_CHECKSUM_EQUAL(bp->blk_cksum, ddk->ddk_cksum)); | |
2379 | ASSERT(zp->zp_copies < SPA_DVAS_PER_BP); | |
2380 | ASSERT(zp->zp_copies == BP_GET_NDVAS(bp) - BP_IS_GANG(bp)); | |
2381 | if (ddp->ddp_phys_birth != 0) | |
2382 | ddt_phys_free(ddt, ddk, ddp, zio->io_txg); | |
2383 | ddt_phys_fill(ddp, bp); | |
2384 | } | |
2385 | ||
2386 | ddt_exit(ddt); | |
2387 | } | |
2388 | ||
2389 | static int | |
2390 | zio_ddt_write(zio_t *zio) | |
2391 | { | |
2392 | spa_t *spa = zio->io_spa; | |
2393 | blkptr_t *bp = zio->io_bp; | |
2394 | uint64_t txg = zio->io_txg; | |
2395 | zio_prop_t *zp = &zio->io_prop; | |
2396 | int p = zp->zp_copies; | |
2397 | int ditto_copies; | |
2398 | zio_t *cio = NULL; | |
2399 | zio_t *dio = NULL; | |
2400 | ddt_t *ddt = ddt_select(spa, bp); | |
2401 | ddt_entry_t *dde; | |
2402 | ddt_phys_t *ddp; | |
2403 | ||
2404 | ASSERT(BP_GET_DEDUP(bp)); | |
2405 | ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); | |
2406 | ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); | |
2407 | ||
2408 | ddt_enter(ddt); | |
2409 | dde = ddt_lookup(ddt, bp, B_TRUE); | |
2410 | ddp = &dde->dde_phys[p]; | |
2411 | ||
2412 | if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { | |
2413 | /* | |
2414 | * If we're using a weak checksum, upgrade to a strong checksum | |
2415 | * and try again. If we're already using a strong checksum, | |
2416 | * we can't resolve it, so just convert to an ordinary write. | |
2417 | * (And automatically e-mail a paper to Nature?) | |
2418 | */ | |
2419 | if (!zio_checksum_table[zp->zp_checksum].ci_dedup) { | |
2420 | zp->zp_checksum = spa_dedup_checksum(spa); | |
2421 | zio_pop_transforms(zio); | |
2422 | zio->io_stage = ZIO_STAGE_OPEN; | |
2423 | BP_ZERO(bp); | |
2424 | } else { | |
03c6040b | 2425 | zp->zp_dedup = B_FALSE; |
428870ff BB |
2426 | } |
2427 | zio->io_pipeline = ZIO_WRITE_PIPELINE; | |
2428 | ddt_exit(ddt); | |
2429 | return (ZIO_PIPELINE_CONTINUE); | |
2430 | } | |
2431 | ||
2432 | ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp); | |
2433 | ASSERT(ditto_copies < SPA_DVAS_PER_BP); | |
2434 | ||
2435 | if (ditto_copies > ddt_ditto_copies_present(dde) && | |
2436 | dde->dde_lead_zio[DDT_PHYS_DITTO] == NULL) { | |
2437 | zio_prop_t czp = *zp; | |
2438 | ||
2439 | czp.zp_copies = ditto_copies; | |
2440 | ||
2441 | /* | |
2442 | * If we arrived here with an override bp, we won't have run | |
2443 | * the transform stack, so we won't have the data we need to | |
2444 | * generate a child i/o. So, toss the override bp and restart. | |
2445 | * This is safe, because using the override bp is just an | |
2446 | * optimization; and it's rare, so the cost doesn't matter. | |
2447 | */ | |
2448 | if (zio->io_bp_override) { | |
2449 | zio_pop_transforms(zio); | |
2450 | zio->io_stage = ZIO_STAGE_OPEN; | |
2451 | zio->io_pipeline = ZIO_WRITE_PIPELINE; | |
2452 | zio->io_bp_override = NULL; | |
2453 | BP_ZERO(bp); | |
2454 | ddt_exit(ddt); | |
2455 | return (ZIO_PIPELINE_CONTINUE); | |
2456 | } | |
2457 | ||
2458 | dio = zio_write(zio, spa, txg, bp, zio->io_orig_data, | |
e8b96c60 | 2459 | zio->io_orig_size, &czp, NULL, NULL, |
428870ff BB |
2460 | zio_ddt_ditto_write_done, dde, zio->io_priority, |
2461 | ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); | |
2462 | ||
2463 | zio_push_transform(dio, zio->io_data, zio->io_size, 0, NULL); | |
2464 | dde->dde_lead_zio[DDT_PHYS_DITTO] = dio; | |
2465 | } | |
2466 | ||
2467 | if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) { | |
2468 | if (ddp->ddp_phys_birth != 0) | |
2469 | ddt_bp_fill(ddp, bp, txg); | |
2470 | if (dde->dde_lead_zio[p] != NULL) | |
2471 | zio_add_child(zio, dde->dde_lead_zio[p]); | |
2472 | else | |
2473 | ddt_phys_addref(ddp); | |
2474 | } else if (zio->io_bp_override) { | |
2475 | ASSERT(bp->blk_birth == txg); | |
2476 | ASSERT(BP_EQUAL(bp, zio->io_bp_override)); | |
2477 | ddt_phys_fill(ddp, bp); | |
2478 | ddt_phys_addref(ddp); | |
2479 | } else { | |
2480 | cio = zio_write(zio, spa, txg, bp, zio->io_orig_data, | |
e8b96c60 | 2481 | zio->io_orig_size, zp, zio_ddt_child_write_ready, NULL, |
428870ff BB |
2482 | zio_ddt_child_write_done, dde, zio->io_priority, |
2483 | ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); | |
2484 | ||
2485 | zio_push_transform(cio, zio->io_data, zio->io_size, 0, NULL); | |
2486 | dde->dde_lead_zio[p] = cio; | |
2487 | } | |
2488 | ||
2489 | ddt_exit(ddt); | |
2490 | ||
2491 | if (cio) | |
2492 | zio_nowait(cio); | |
2493 | if (dio) | |
2494 | zio_nowait(dio); | |
2495 | ||
2496 | return (ZIO_PIPELINE_CONTINUE); | |
2497 | } | |
2498 | ||
2499 | ddt_entry_t *freedde; /* for debugging */ | |
b128c09f | 2500 | |
428870ff BB |
2501 | static int |
2502 | zio_ddt_free(zio_t *zio) | |
2503 | { | |
2504 | spa_t *spa = zio->io_spa; | |
2505 | blkptr_t *bp = zio->io_bp; | |
2506 | ddt_t *ddt = ddt_select(spa, bp); | |
2507 | ddt_entry_t *dde; | |
2508 | ddt_phys_t *ddp; | |
2509 | ||
2510 | ASSERT(BP_GET_DEDUP(bp)); | |
2511 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
2512 | ||
2513 | ddt_enter(ddt); | |
2514 | freedde = dde = ddt_lookup(ddt, bp, B_TRUE); | |
5dc6af0e BB |
2515 | if (dde) { |
2516 | ddp = ddt_phys_select(dde, bp); | |
2517 | if (ddp) | |
2518 | ddt_phys_decref(ddp); | |
2519 | } | |
428870ff BB |
2520 | ddt_exit(ddt); |
2521 | ||
2522 | return (ZIO_PIPELINE_CONTINUE); | |
2523 | } | |
2524 | ||
2525 | /* | |
2526 | * ========================================================================== | |
2527 | * Allocate and free blocks | |
2528 | * ========================================================================== | |
2529 | */ | |
34dc7c2f BB |
2530 | static int |
2531 | zio_dva_allocate(zio_t *zio) | |
2532 | { | |
2533 | spa_t *spa = zio->io_spa; | |
428870ff | 2534 | metaslab_class_t *mc = spa_normal_class(spa); |
34dc7c2f BB |
2535 | blkptr_t *bp = zio->io_bp; |
2536 | int error; | |
6d974228 | 2537 | int flags = 0; |
34dc7c2f | 2538 | |
9babb374 BB |
2539 | if (zio->io_gang_leader == NULL) { |
2540 | ASSERT(zio->io_child_type > ZIO_CHILD_GANG); | |
2541 | zio->io_gang_leader = zio; | |
2542 | } | |
2543 | ||
34dc7c2f | 2544 | ASSERT(BP_IS_HOLE(bp)); |
c99c9001 | 2545 | ASSERT0(BP_GET_NDVAS(bp)); |
428870ff BB |
2546 | ASSERT3U(zio->io_prop.zp_copies, >, 0); |
2547 | ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); | |
34dc7c2f BB |
2548 | ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); |
2549 | ||
6d974228 GW |
2550 | /* |
2551 | * The dump device does not support gang blocks so allocation on | |
2552 | * behalf of the dump device (i.e. ZIO_FLAG_NODATA) must avoid | |
2553 | * the "fast" gang feature. | |
2554 | */ | |
2555 | flags |= (zio->io_flags & ZIO_FLAG_NODATA) ? METASLAB_GANG_AVOID : 0; | |
2556 | flags |= (zio->io_flags & ZIO_FLAG_GANG_CHILD) ? | |
2557 | METASLAB_GANG_CHILD : 0; | |
920dd524 | 2558 | flags |= (zio->io_flags & ZIO_FLAG_FASTWRITE) ? METASLAB_FASTWRITE : 0; |
b128c09f | 2559 | error = metaslab_alloc(spa, mc, zio->io_size, bp, |
6d974228 | 2560 | zio->io_prop.zp_copies, zio->io_txg, NULL, flags); |
34dc7c2f | 2561 | |
b128c09f | 2562 | if (error) { |
6d974228 GW |
2563 | spa_dbgmsg(spa, "%s: metaslab allocation failure: zio %p, " |
2564 | "size %llu, error %d", spa_name(spa), zio, zio->io_size, | |
2565 | error); | |
b128c09f BB |
2566 | if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) |
2567 | return (zio_write_gang_block(zio)); | |
34dc7c2f BB |
2568 | zio->io_error = error; |
2569 | } | |
2570 | ||
2571 | return (ZIO_PIPELINE_CONTINUE); | |
2572 | } | |
2573 | ||
2574 | static int | |
2575 | zio_dva_free(zio_t *zio) | |
2576 | { | |
b128c09f | 2577 | metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); |
34dc7c2f BB |
2578 | |
2579 | return (ZIO_PIPELINE_CONTINUE); | |
2580 | } | |
2581 | ||
2582 | static int | |
2583 | zio_dva_claim(zio_t *zio) | |
2584 | { | |
b128c09f BB |
2585 | int error; |
2586 | ||
2587 | error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); | |
2588 | if (error) | |
2589 | zio->io_error = error; | |
34dc7c2f BB |
2590 | |
2591 | return (ZIO_PIPELINE_CONTINUE); | |
2592 | } | |
2593 | ||
b128c09f BB |
2594 | /* |
2595 | * Undo an allocation. This is used by zio_done() when an I/O fails | |
2596 | * and we want to give back the block we just allocated. | |
2597 | * This handles both normal blocks and gang blocks. | |
2598 | */ | |
2599 | static void | |
2600 | zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) | |
2601 | { | |
d6320ddb BB |
2602 | int g; |
2603 | ||
b128c09f | 2604 | ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); |
428870ff | 2605 | ASSERT(zio->io_bp_override == NULL); |
b128c09f BB |
2606 | |
2607 | if (!BP_IS_HOLE(bp)) | |
428870ff | 2608 | metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE); |
b128c09f BB |
2609 | |
2610 | if (gn != NULL) { | |
d6320ddb | 2611 | for (g = 0; g < SPA_GBH_NBLKPTRS; g++) { |
b128c09f BB |
2612 | zio_dva_unallocate(zio, gn->gn_child[g], |
2613 | &gn->gn_gbh->zg_blkptr[g]); | |
2614 | } | |
2615 | } | |
2616 | } | |
2617 | ||
2618 | /* | |
2619 | * Try to allocate an intent log block. Return 0 on success, errno on failure. | |
2620 | */ | |
2621 | int | |
920dd524 ED |
2622 | zio_alloc_zil(spa_t *spa, uint64_t txg, blkptr_t *new_bp, uint64_t size, |
2623 | boolean_t use_slog) | |
b128c09f | 2624 | { |
428870ff | 2625 | int error = 1; |
b128c09f | 2626 | |
428870ff BB |
2627 | ASSERT(txg > spa_syncing_txg(spa)); |
2628 | ||
ebf8e3a2 BB |
2629 | /* |
2630 | * ZIL blocks are always contiguous (i.e. not gang blocks) so we | |
2631 | * set the METASLAB_GANG_AVOID flag so that they don't "fast gang" | |
2632 | * when allocating them. | |
2633 | */ | |
2634 | if (use_slog) { | |
428870ff | 2635 | error = metaslab_alloc(spa, spa_log_class(spa), size, |
920dd524 ED |
2636 | new_bp, 1, txg, NULL, |
2637 | METASLAB_FASTWRITE | METASLAB_GANG_AVOID); | |
ebf8e3a2 | 2638 | } |
b128c09f | 2639 | |
ebf8e3a2 | 2640 | if (error) { |
428870ff | 2641 | error = metaslab_alloc(spa, spa_normal_class(spa), size, |
920dd524 | 2642 | new_bp, 1, txg, NULL, |
ac72fac3 | 2643 | METASLAB_FASTWRITE); |
ebf8e3a2 | 2644 | } |
b128c09f BB |
2645 | |
2646 | if (error == 0) { | |
2647 | BP_SET_LSIZE(new_bp, size); | |
2648 | BP_SET_PSIZE(new_bp, size); | |
2649 | BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); | |
428870ff BB |
2650 | BP_SET_CHECKSUM(new_bp, |
2651 | spa_version(spa) >= SPA_VERSION_SLIM_ZIL | |
2652 | ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); | |
b128c09f BB |
2653 | BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); |
2654 | BP_SET_LEVEL(new_bp, 0); | |
428870ff | 2655 | BP_SET_DEDUP(new_bp, 0); |
b128c09f BB |
2656 | BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); |
2657 | } | |
2658 | ||
2659 | return (error); | |
2660 | } | |
2661 | ||
2662 | /* | |
428870ff | 2663 | * Free an intent log block. |
b128c09f BB |
2664 | */ |
2665 | void | |
428870ff | 2666 | zio_free_zil(spa_t *spa, uint64_t txg, blkptr_t *bp) |
b128c09f | 2667 | { |
428870ff | 2668 | ASSERT(BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG); |
b128c09f BB |
2669 | ASSERT(!BP_IS_GANG(bp)); |
2670 | ||
428870ff | 2671 | zio_free(spa, txg, bp); |
b128c09f BB |
2672 | } |
2673 | ||
34dc7c2f BB |
2674 | /* |
2675 | * ========================================================================== | |
2676 | * Read and write to physical devices | |
2677 | * ========================================================================== | |
2678 | */ | |
98b25418 GW |
2679 | |
2680 | ||
2681 | /* | |
2682 | * Issue an I/O to the underlying vdev. Typically the issue pipeline | |
2683 | * stops after this stage and will resume upon I/O completion. | |
2684 | * However, there are instances where the vdev layer may need to | |
2685 | * continue the pipeline when an I/O was not issued. Since the I/O | |
2686 | * that was sent to the vdev layer might be different than the one | |
2687 | * currently active in the pipeline (see vdev_queue_io()), we explicitly | |
2688 | * force the underlying vdev layers to call either zio_execute() or | |
2689 | * zio_interrupt() to ensure that the pipeline continues with the correct I/O. | |
2690 | */ | |
34dc7c2f BB |
2691 | static int |
2692 | zio_vdev_io_start(zio_t *zio) | |
2693 | { | |
2694 | vdev_t *vd = zio->io_vd; | |
34dc7c2f BB |
2695 | uint64_t align; |
2696 | spa_t *spa = zio->io_spa; | |
2697 | ||
193a37cb TH |
2698 | zio->io_delay = 0; |
2699 | ||
b128c09f BB |
2700 | ASSERT(zio->io_error == 0); |
2701 | ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); | |
34dc7c2f | 2702 | |
b128c09f BB |
2703 | if (vd == NULL) { |
2704 | if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) | |
2705 | spa_config_enter(spa, SCL_ZIO, zio, RW_READER); | |
34dc7c2f | 2706 | |
b128c09f BB |
2707 | /* |
2708 | * The mirror_ops handle multiple DVAs in a single BP. | |
2709 | */ | |
98b25418 GW |
2710 | vdev_mirror_ops.vdev_op_io_start(zio); |
2711 | return (ZIO_PIPELINE_STOP); | |
34dc7c2f BB |
2712 | } |
2713 | ||
572e2857 BB |
2714 | /* |
2715 | * We keep track of time-sensitive I/Os so that the scan thread | |
2716 | * can quickly react to certain workloads. In particular, we care | |
2717 | * about non-scrubbing, top-level reads and writes with the following | |
2718 | * characteristics: | |
98b25418 | 2719 | * - synchronous writes of user data to non-slog devices |
572e2857 BB |
2720 | * - any reads of user data |
2721 | * When these conditions are met, adjust the timestamp of spa_last_io | |
2722 | * which allows the scan thread to adjust its workload accordingly. | |
2723 | */ | |
2724 | if (!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && zio->io_bp != NULL && | |
2725 | vd == vd->vdev_top && !vd->vdev_islog && | |
2726 | zio->io_bookmark.zb_objset != DMU_META_OBJSET && | |
2727 | zio->io_txg != spa_syncing_txg(spa)) { | |
2728 | uint64_t old = spa->spa_last_io; | |
2729 | uint64_t new = ddi_get_lbolt64(); | |
2730 | if (old != new) | |
2731 | (void) atomic_cas_64(&spa->spa_last_io, old, new); | |
2732 | } | |
2733 | ||
b128c09f BB |
2734 | align = 1ULL << vd->vdev_top->vdev_ashift; |
2735 | ||
b02fe35d AR |
2736 | if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) && |
2737 | P2PHASE(zio->io_size, align) != 0) { | |
2738 | /* Transform logical writes to be a full physical block size. */ | |
34dc7c2f BB |
2739 | uint64_t asize = P2ROUNDUP(zio->io_size, align); |
2740 | char *abuf = zio_buf_alloc(asize); | |
178e73b3 | 2741 | ASSERT(vd == vd->vdev_top); |
34dc7c2f BB |
2742 | if (zio->io_type == ZIO_TYPE_WRITE) { |
2743 | bcopy(zio->io_data, abuf, zio->io_size); | |
2744 | bzero(abuf + zio->io_size, asize - zio->io_size); | |
2745 | } | |
b128c09f | 2746 | zio_push_transform(zio, abuf, asize, asize, zio_subblock); |
34dc7c2f BB |
2747 | } |
2748 | ||
b02fe35d AR |
2749 | /* |
2750 | * If this is not a physical io, make sure that it is properly aligned | |
2751 | * before proceeding. | |
2752 | */ | |
2753 | if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) { | |
2754 | ASSERT0(P2PHASE(zio->io_offset, align)); | |
2755 | ASSERT0(P2PHASE(zio->io_size, align)); | |
2756 | } else { | |
2757 | /* | |
2758 | * For physical writes, we allow 512b aligned writes and assume | |
2759 | * the device will perform a read-modify-write as necessary. | |
2760 | */ | |
2761 | ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE)); | |
2762 | ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE)); | |
2763 | } | |
2764 | ||
572e2857 | 2765 | VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa)); |
fb5f0bc8 BB |
2766 | |
2767 | /* | |
2768 | * If this is a repair I/O, and there's no self-healing involved -- | |
2769 | * that is, we're just resilvering what we expect to resilver -- | |
2770 | * then don't do the I/O unless zio's txg is actually in vd's DTL. | |
2771 | * This prevents spurious resilvering with nested replication. | |
2772 | * For example, given a mirror of mirrors, (A+B)+(C+D), if only | |
2773 | * A is out of date, we'll read from C+D, then use the data to | |
2774 | * resilver A+B -- but we don't actually want to resilver B, just A. | |
2775 | * The top-level mirror has no way to know this, so instead we just | |
2776 | * discard unnecessary repairs as we work our way down the vdev tree. | |
2777 | * The same logic applies to any form of nested replication: | |
2778 | * ditto + mirror, RAID-Z + replacing, etc. This covers them all. | |
2779 | */ | |
2780 | if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && | |
2781 | !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && | |
2782 | zio->io_txg != 0 && /* not a delegated i/o */ | |
2783 | !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { | |
2784 | ASSERT(zio->io_type == ZIO_TYPE_WRITE); | |
fb5f0bc8 BB |
2785 | zio_vdev_io_bypass(zio); |
2786 | return (ZIO_PIPELINE_CONTINUE); | |
2787 | } | |
34dc7c2f | 2788 | |
b128c09f BB |
2789 | if (vd->vdev_ops->vdev_op_leaf && |
2790 | (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE)) { | |
2791 | ||
b0bc7a84 | 2792 | if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio)) |
d164b209 | 2793 | return (ZIO_PIPELINE_CONTINUE); |
b128c09f BB |
2794 | |
2795 | if ((zio = vdev_queue_io(zio)) == NULL) | |
2796 | return (ZIO_PIPELINE_STOP); | |
2797 | ||
2798 | if (!vdev_accessible(vd, zio)) { | |
2e528b49 | 2799 | zio->io_error = SET_ERROR(ENXIO); |
b128c09f BB |
2800 | zio_interrupt(zio); |
2801 | return (ZIO_PIPELINE_STOP); | |
2802 | } | |
b128c09f BB |
2803 | } |
2804 | ||
193a37cb | 2805 | zio->io_delay = gethrtime(); |
98b25418 GW |
2806 | vd->vdev_ops->vdev_op_io_start(zio); |
2807 | return (ZIO_PIPELINE_STOP); | |
34dc7c2f BB |
2808 | } |
2809 | ||
2810 | static int | |
2811 | zio_vdev_io_done(zio_t *zio) | |
2812 | { | |
b128c09f BB |
2813 | vdev_t *vd = zio->io_vd; |
2814 | vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; | |
2815 | boolean_t unexpected_error = B_FALSE; | |
34dc7c2f | 2816 | |
b128c09f BB |
2817 | if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) |
2818 | return (ZIO_PIPELINE_STOP); | |
34dc7c2f | 2819 | |
b128c09f BB |
2820 | ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE); |
2821 | ||
193a37cb TH |
2822 | if (zio->io_delay) |
2823 | zio->io_delay = gethrtime() - zio->io_delay; | |
2824 | ||
b128c09f BB |
2825 | if (vd != NULL && vd->vdev_ops->vdev_op_leaf) { |
2826 | ||
2827 | vdev_queue_io_done(zio); | |
2828 | ||
2829 | if (zio->io_type == ZIO_TYPE_WRITE) | |
2830 | vdev_cache_write(zio); | |
2831 | ||
2832 | if (zio_injection_enabled && zio->io_error == 0) | |
9babb374 BB |
2833 | zio->io_error = zio_handle_device_injection(vd, |
2834 | zio, EIO); | |
b128c09f BB |
2835 | |
2836 | if (zio_injection_enabled && zio->io_error == 0) | |
2837 | zio->io_error = zio_handle_label_injection(zio, EIO); | |
2838 | ||
2839 | if (zio->io_error) { | |
2840 | if (!vdev_accessible(vd, zio)) { | |
2e528b49 | 2841 | zio->io_error = SET_ERROR(ENXIO); |
b128c09f BB |
2842 | } else { |
2843 | unexpected_error = B_TRUE; | |
2844 | } | |
2845 | } | |
2846 | } | |
2847 | ||
2848 | ops->vdev_op_io_done(zio); | |
34dc7c2f | 2849 | |
b128c09f | 2850 | if (unexpected_error) |
d164b209 | 2851 | VERIFY(vdev_probe(vd, zio) == NULL); |
34dc7c2f | 2852 | |
b128c09f | 2853 | return (ZIO_PIPELINE_CONTINUE); |
34dc7c2f BB |
2854 | } |
2855 | ||
428870ff BB |
2856 | /* |
2857 | * For non-raidz ZIOs, we can just copy aside the bad data read from the | |
2858 | * disk, and use that to finish the checksum ereport later. | |
2859 | */ | |
2860 | static void | |
2861 | zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, | |
2862 | const void *good_buf) | |
2863 | { | |
2864 | /* no processing needed */ | |
2865 | zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); | |
2866 | } | |
2867 | ||
2868 | /*ARGSUSED*/ | |
2869 | void | |
2870 | zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored) | |
2871 | { | |
2872 | void *buf = zio_buf_alloc(zio->io_size); | |
2873 | ||
2874 | bcopy(zio->io_data, buf, zio->io_size); | |
2875 | ||
2876 | zcr->zcr_cbinfo = zio->io_size; | |
2877 | zcr->zcr_cbdata = buf; | |
2878 | zcr->zcr_finish = zio_vsd_default_cksum_finish; | |
2879 | zcr->zcr_free = zio_buf_free; | |
2880 | } | |
2881 | ||
34dc7c2f BB |
2882 | static int |
2883 | zio_vdev_io_assess(zio_t *zio) | |
2884 | { | |
2885 | vdev_t *vd = zio->io_vd; | |
b128c09f BB |
2886 | |
2887 | if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) | |
2888 | return (ZIO_PIPELINE_STOP); | |
2889 | ||
2890 | if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) | |
2891 | spa_config_exit(zio->io_spa, SCL_ZIO, zio); | |
2892 | ||
2893 | if (zio->io_vsd != NULL) { | |
428870ff | 2894 | zio->io_vsd_ops->vsd_free(zio); |
b128c09f | 2895 | zio->io_vsd = NULL; |
34dc7c2f BB |
2896 | } |
2897 | ||
b128c09f | 2898 | if (zio_injection_enabled && zio->io_error == 0) |
34dc7c2f BB |
2899 | zio->io_error = zio_handle_fault_injection(zio, EIO); |
2900 | ||
2901 | /* | |
2902 | * If the I/O failed, determine whether we should attempt to retry it. | |
428870ff BB |
2903 | * |
2904 | * On retry, we cut in line in the issue queue, since we don't want | |
2905 | * compression/checksumming/etc. work to prevent our (cheap) IO reissue. | |
34dc7c2f | 2906 | */ |
b128c09f BB |
2907 | if (zio->io_error && vd == NULL && |
2908 | !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { | |
2909 | ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ | |
2910 | ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ | |
34dc7c2f | 2911 | zio->io_error = 0; |
b128c09f BB |
2912 | zio->io_flags |= ZIO_FLAG_IO_RETRY | |
2913 | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE; | |
428870ff BB |
2914 | zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; |
2915 | zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, | |
2916 | zio_requeue_io_start_cut_in_line); | |
b128c09f | 2917 | return (ZIO_PIPELINE_STOP); |
34dc7c2f BB |
2918 | } |
2919 | ||
b128c09f BB |
2920 | /* |
2921 | * If we got an error on a leaf device, convert it to ENXIO | |
2922 | * if the device is not accessible at all. | |
2923 | */ | |
2924 | if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && | |
2925 | !vdev_accessible(vd, zio)) | |
2e528b49 | 2926 | zio->io_error = SET_ERROR(ENXIO); |
b128c09f BB |
2927 | |
2928 | /* | |
2929 | * If we can't write to an interior vdev (mirror or RAID-Z), | |
2930 | * set vdev_cant_write so that we stop trying to allocate from it. | |
2931 | */ | |
2932 | if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && | |
13fe0198 | 2933 | vd != NULL && !vd->vdev_ops->vdev_op_leaf) { |
b128c09f | 2934 | vd->vdev_cant_write = B_TRUE; |
13fe0198 | 2935 | } |
b128c09f BB |
2936 | |
2937 | if (zio->io_error) | |
2938 | zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; | |
2939 | ||
e8b96c60 MA |
2940 | if (vd != NULL && vd->vdev_ops->vdev_op_leaf && |
2941 | zio->io_physdone != NULL) { | |
2942 | ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED)); | |
2943 | ASSERT(zio->io_child_type == ZIO_CHILD_VDEV); | |
2944 | zio->io_physdone(zio->io_logical); | |
2945 | } | |
2946 | ||
34dc7c2f BB |
2947 | return (ZIO_PIPELINE_CONTINUE); |
2948 | } | |
2949 | ||
2950 | void | |
2951 | zio_vdev_io_reissue(zio_t *zio) | |
2952 | { | |
2953 | ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); | |
2954 | ASSERT(zio->io_error == 0); | |
2955 | ||
428870ff | 2956 | zio->io_stage >>= 1; |
34dc7c2f BB |
2957 | } |
2958 | ||
2959 | void | |
2960 | zio_vdev_io_redone(zio_t *zio) | |
2961 | { | |
2962 | ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); | |
2963 | ||
428870ff | 2964 | zio->io_stage >>= 1; |
34dc7c2f BB |
2965 | } |
2966 | ||
2967 | void | |
2968 | zio_vdev_io_bypass(zio_t *zio) | |
2969 | { | |
2970 | ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); | |
2971 | ASSERT(zio->io_error == 0); | |
2972 | ||
2973 | zio->io_flags |= ZIO_FLAG_IO_BYPASS; | |
428870ff | 2974 | zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; |
34dc7c2f BB |
2975 | } |
2976 | ||
2977 | /* | |
2978 | * ========================================================================== | |
2979 | * Generate and verify checksums | |
2980 | * ========================================================================== | |
2981 | */ | |
2982 | static int | |
2983 | zio_checksum_generate(zio_t *zio) | |
2984 | { | |
34dc7c2f | 2985 | blkptr_t *bp = zio->io_bp; |
b128c09f | 2986 | enum zio_checksum checksum; |
34dc7c2f | 2987 | |
b128c09f BB |
2988 | if (bp == NULL) { |
2989 | /* | |
2990 | * This is zio_write_phys(). | |
2991 | * We're either generating a label checksum, or none at all. | |
2992 | */ | |
2993 | checksum = zio->io_prop.zp_checksum; | |
34dc7c2f | 2994 | |
b128c09f BB |
2995 | if (checksum == ZIO_CHECKSUM_OFF) |
2996 | return (ZIO_PIPELINE_CONTINUE); | |
2997 | ||
2998 | ASSERT(checksum == ZIO_CHECKSUM_LABEL); | |
2999 | } else { | |
3000 | if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { | |
3001 | ASSERT(!IO_IS_ALLOCATING(zio)); | |
3002 | checksum = ZIO_CHECKSUM_GANG_HEADER; | |
3003 | } else { | |
3004 | checksum = BP_GET_CHECKSUM(bp); | |
3005 | } | |
3006 | } | |
34dc7c2f | 3007 | |
b128c09f | 3008 | zio_checksum_compute(zio, checksum, zio->io_data, zio->io_size); |
34dc7c2f BB |
3009 | |
3010 | return (ZIO_PIPELINE_CONTINUE); | |
3011 | } | |
3012 | ||
3013 | static int | |
b128c09f | 3014 | zio_checksum_verify(zio_t *zio) |
34dc7c2f | 3015 | { |
428870ff | 3016 | zio_bad_cksum_t info; |
b128c09f BB |
3017 | blkptr_t *bp = zio->io_bp; |
3018 | int error; | |
34dc7c2f | 3019 | |
428870ff BB |
3020 | ASSERT(zio->io_vd != NULL); |
3021 | ||
b128c09f BB |
3022 | if (bp == NULL) { |
3023 | /* | |
3024 | * This is zio_read_phys(). | |
3025 | * We're either verifying a label checksum, or nothing at all. | |
3026 | */ | |
3027 | if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) | |
3028 | return (ZIO_PIPELINE_CONTINUE); | |
34dc7c2f | 3029 | |
b128c09f BB |
3030 | ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL); |
3031 | } | |
34dc7c2f | 3032 | |
428870ff | 3033 | if ((error = zio_checksum_error(zio, &info)) != 0) { |
b128c09f | 3034 | zio->io_error = error; |
7a3066ff MA |
3035 | if (error == ECKSUM && |
3036 | !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { | |
428870ff BB |
3037 | zfs_ereport_start_checksum(zio->io_spa, |
3038 | zio->io_vd, zio, zio->io_offset, | |
3039 | zio->io_size, NULL, &info); | |
b128c09f | 3040 | } |
34dc7c2f BB |
3041 | } |
3042 | ||
3043 | return (ZIO_PIPELINE_CONTINUE); | |
3044 | } | |
3045 | ||
3046 | /* | |
3047 | * Called by RAID-Z to ensure we don't compute the checksum twice. | |
3048 | */ | |
3049 | void | |
3050 | zio_checksum_verified(zio_t *zio) | |
3051 | { | |
428870ff | 3052 | zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; |
34dc7c2f BB |
3053 | } |
3054 | ||
3055 | /* | |
b128c09f BB |
3056 | * ========================================================================== |
3057 | * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. | |
9b67f605 | 3058 | * An error of 0 indicates success. ENXIO indicates whole-device failure, |
b128c09f BB |
3059 | * which may be transient (e.g. unplugged) or permament. ECKSUM and EIO |
3060 | * indicate errors that are specific to one I/O, and most likely permanent. | |
3061 | * Any other error is presumed to be worse because we weren't expecting it. | |
3062 | * ========================================================================== | |
34dc7c2f | 3063 | */ |
b128c09f BB |
3064 | int |
3065 | zio_worst_error(int e1, int e2) | |
34dc7c2f | 3066 | { |
b128c09f BB |
3067 | static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; |
3068 | int r1, r2; | |
3069 | ||
3070 | for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) | |
3071 | if (e1 == zio_error_rank[r1]) | |
3072 | break; | |
34dc7c2f | 3073 | |
b128c09f BB |
3074 | for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) |
3075 | if (e2 == zio_error_rank[r2]) | |
3076 | break; | |
3077 | ||
3078 | return (r1 > r2 ? e1 : e2); | |
34dc7c2f BB |
3079 | } |
3080 | ||
3081 | /* | |
3082 | * ========================================================================== | |
b128c09f | 3083 | * I/O completion |
34dc7c2f BB |
3084 | * ========================================================================== |
3085 | */ | |
b128c09f BB |
3086 | static int |
3087 | zio_ready(zio_t *zio) | |
34dc7c2f | 3088 | { |
b128c09f | 3089 | blkptr_t *bp = zio->io_bp; |
d164b209 | 3090 | zio_t *pio, *pio_next; |
34dc7c2f | 3091 | |
428870ff BB |
3092 | if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || |
3093 | zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_READY)) | |
9babb374 | 3094 | return (ZIO_PIPELINE_STOP); |
34dc7c2f | 3095 | |
9babb374 | 3096 | if (zio->io_ready) { |
b128c09f | 3097 | ASSERT(IO_IS_ALLOCATING(zio)); |
03c6040b GW |
3098 | ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) || |
3099 | (zio->io_flags & ZIO_FLAG_NOPWRITE)); | |
b128c09f | 3100 | ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); |
34dc7c2f | 3101 | |
b128c09f BB |
3102 | zio->io_ready(zio); |
3103 | } | |
34dc7c2f | 3104 | |
b128c09f BB |
3105 | if (bp != NULL && bp != &zio->io_bp_copy) |
3106 | zio->io_bp_copy = *bp; | |
34dc7c2f | 3107 | |
b128c09f BB |
3108 | if (zio->io_error) |
3109 | zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; | |
34dc7c2f | 3110 | |
d164b209 BB |
3111 | mutex_enter(&zio->io_lock); |
3112 | zio->io_state[ZIO_WAIT_READY] = 1; | |
3113 | pio = zio_walk_parents(zio); | |
3114 | mutex_exit(&zio->io_lock); | |
3115 | ||
3116 | /* | |
3117 | * As we notify zio's parents, new parents could be added. | |
3118 | * New parents go to the head of zio's io_parent_list, however, | |
3119 | * so we will (correctly) not notify them. The remainder of zio's | |
3120 | * io_parent_list, from 'pio_next' onward, cannot change because | |
3121 | * all parents must wait for us to be done before they can be done. | |
3122 | */ | |
3123 | for (; pio != NULL; pio = pio_next) { | |
3124 | pio_next = zio_walk_parents(zio); | |
b128c09f | 3125 | zio_notify_parent(pio, zio, ZIO_WAIT_READY); |
d164b209 | 3126 | } |
34dc7c2f | 3127 | |
428870ff BB |
3128 | if (zio->io_flags & ZIO_FLAG_NODATA) { |
3129 | if (BP_IS_GANG(bp)) { | |
3130 | zio->io_flags &= ~ZIO_FLAG_NODATA; | |
3131 | } else { | |
3132 | ASSERT((uintptr_t)zio->io_data < SPA_MAXBLOCKSIZE); | |
3133 | zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; | |
3134 | } | |
3135 | } | |
3136 | ||
3137 | if (zio_injection_enabled && | |
3138 | zio->io_spa->spa_syncing_txg == zio->io_txg) | |
3139 | zio_handle_ignored_writes(zio); | |
3140 | ||
b128c09f | 3141 | return (ZIO_PIPELINE_CONTINUE); |
34dc7c2f BB |
3142 | } |
3143 | ||
b128c09f BB |
3144 | static int |
3145 | zio_done(zio_t *zio) | |
34dc7c2f | 3146 | { |
d164b209 | 3147 | zio_t *pio, *pio_next; |
d6320ddb | 3148 | int c, w; |
34dc7c2f | 3149 | |
b128c09f | 3150 | /* |
9babb374 | 3151 | * If our children haven't all completed, |
b128c09f BB |
3152 | * wait for them and then repeat this pipeline stage. |
3153 | */ | |
3154 | if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE) || | |
3155 | zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE) || | |
428870ff | 3156 | zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE) || |
b128c09f BB |
3157 | zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_DONE)) |
3158 | return (ZIO_PIPELINE_STOP); | |
34dc7c2f | 3159 | |
d6320ddb BB |
3160 | for (c = 0; c < ZIO_CHILD_TYPES; c++) |
3161 | for (w = 0; w < ZIO_WAIT_TYPES; w++) | |
b128c09f BB |
3162 | ASSERT(zio->io_children[c][w] == 0); |
3163 | ||
9b67f605 | 3164 | if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) { |
c776b317 BB |
3165 | ASSERT(zio->io_bp->blk_pad[0] == 0); |
3166 | ASSERT(zio->io_bp->blk_pad[1] == 0); | |
d1d7e268 MK |
3167 | ASSERT(bcmp(zio->io_bp, &zio->io_bp_copy, |
3168 | sizeof (blkptr_t)) == 0 || | |
c776b317 BB |
3169 | (zio->io_bp == zio_unique_parent(zio)->io_bp)); |
3170 | if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) && | |
428870ff | 3171 | zio->io_bp_override == NULL && |
b128c09f | 3172 | !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { |
c776b317 | 3173 | ASSERT(!BP_SHOULD_BYTESWAP(zio->io_bp)); |
d1d7e268 MK |
3174 | ASSERT3U(zio->io_prop.zp_copies, <=, |
3175 | BP_GET_NDVAS(zio->io_bp)); | |
c776b317 | 3176 | ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 || |
d1d7e268 MK |
3177 | (BP_COUNT_GANG(zio->io_bp) == |
3178 | BP_GET_NDVAS(zio->io_bp))); | |
b128c09f | 3179 | } |
03c6040b GW |
3180 | if (zio->io_flags & ZIO_FLAG_NOPWRITE) |
3181 | VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig)); | |
b128c09f BB |
3182 | } |
3183 | ||
3184 | /* | |
428870ff | 3185 | * If there were child vdev/gang/ddt errors, they apply to us now. |
b128c09f BB |
3186 | */ |
3187 | zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); | |
3188 | zio_inherit_child_errors(zio, ZIO_CHILD_GANG); | |
428870ff BB |
3189 | zio_inherit_child_errors(zio, ZIO_CHILD_DDT); |
3190 | ||
3191 | /* | |
3192 | * If the I/O on the transformed data was successful, generate any | |
3193 | * checksum reports now while we still have the transformed data. | |
3194 | */ | |
3195 | if (zio->io_error == 0) { | |
3196 | while (zio->io_cksum_report != NULL) { | |
3197 | zio_cksum_report_t *zcr = zio->io_cksum_report; | |
3198 | uint64_t align = zcr->zcr_align; | |
c776b317 | 3199 | uint64_t asize = P2ROUNDUP(zio->io_size, align); |
428870ff BB |
3200 | char *abuf = zio->io_data; |
3201 | ||
c776b317 | 3202 | if (asize != zio->io_size) { |
428870ff | 3203 | abuf = zio_buf_alloc(asize); |
c776b317 | 3204 | bcopy(zio->io_data, abuf, zio->io_size); |
d1d7e268 | 3205 | bzero(abuf+zio->io_size, asize-zio->io_size); |
428870ff BB |
3206 | } |
3207 | ||
3208 | zio->io_cksum_report = zcr->zcr_next; | |
3209 | zcr->zcr_next = NULL; | |
3210 | zcr->zcr_finish(zcr, abuf); | |
3211 | zfs_ereport_free_checksum(zcr); | |
3212 | ||
c776b317 | 3213 | if (asize != zio->io_size) |
428870ff BB |
3214 | zio_buf_free(abuf, asize); |
3215 | } | |
3216 | } | |
b128c09f BB |
3217 | |
3218 | zio_pop_transforms(zio); /* note: may set zio->io_error */ | |
3219 | ||
c776b317 | 3220 | vdev_stat_update(zio, zio->io_size); |
b128c09f | 3221 | |
a69052be | 3222 | /* |
cc92e9d0 | 3223 | * If this I/O is attached to a particular vdev is slow, exceeding |
72f53c56 MJ |
3224 | * 30 seconds to complete, post an error described the I/O delay. |
3225 | * We ignore these errors if the device is currently unavailable. | |
a69052be | 3226 | */ |
193a37cb | 3227 | if (zio->io_delay >= MSEC2NSEC(zio_delay_max)) { |
72f53c56 MJ |
3228 | if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) |
3229 | zfs_ereport_post(FM_EREPORT_ZFS_DELAY, zio->io_spa, | |
d1d7e268 | 3230 | zio->io_vd, zio, 0, 0); |
72f53c56 | 3231 | } |
a69052be | 3232 | |
b128c09f BB |
3233 | if (zio->io_error) { |
3234 | /* | |
3235 | * If this I/O is attached to a particular vdev, | |
3236 | * generate an error message describing the I/O failure | |
3237 | * at the block level. We ignore these errors if the | |
3238 | * device is currently unavailable. | |
3239 | */ | |
c776b317 BB |
3240 | if (zio->io_error != ECKSUM && zio->io_vd != NULL && |
3241 | !vdev_is_dead(zio->io_vd)) | |
3242 | zfs_ereport_post(FM_EREPORT_ZFS_IO, zio->io_spa, | |
3243 | zio->io_vd, zio, 0, 0); | |
34dc7c2f | 3244 | |
428870ff BB |
3245 | if ((zio->io_error == EIO || !(zio->io_flags & |
3246 | (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && | |
c776b317 | 3247 | zio == zio->io_logical) { |
b128c09f BB |
3248 | /* |
3249 | * For logical I/O requests, tell the SPA to log the | |
3250 | * error and generate a logical data ereport. | |
3251 | */ | |
c776b317 | 3252 | spa_log_error(zio->io_spa, zio); |
d1d7e268 MK |
3253 | zfs_ereport_post(FM_EREPORT_ZFS_DATA, zio->io_spa, |
3254 | NULL, zio, 0, 0); | |
b128c09f BB |
3255 | } |
3256 | } | |
34dc7c2f | 3257 | |
c776b317 | 3258 | if (zio->io_error && zio == zio->io_logical) { |
b128c09f BB |
3259 | /* |
3260 | * Determine whether zio should be reexecuted. This will | |
3261 | * propagate all the way to the root via zio_notify_parent(). | |
3262 | */ | |
c776b317 | 3263 | ASSERT(zio->io_vd == NULL && zio->io_bp != NULL); |
428870ff | 3264 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); |
b128c09f | 3265 | |
428870ff BB |
3266 | if (IO_IS_ALLOCATING(zio) && |
3267 | !(zio->io_flags & ZIO_FLAG_CANFAIL)) { | |
b128c09f BB |
3268 | if (zio->io_error != ENOSPC) |
3269 | zio->io_reexecute |= ZIO_REEXECUTE_NOW; | |
3270 | else | |
3271 | zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; | |
428870ff | 3272 | } |
b128c09f BB |
3273 | |
3274 | if ((zio->io_type == ZIO_TYPE_READ || | |
3275 | zio->io_type == ZIO_TYPE_FREE) && | |
572e2857 | 3276 | !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && |
b128c09f | 3277 | zio->io_error == ENXIO && |
c776b317 BB |
3278 | spa_load_state(zio->io_spa) == SPA_LOAD_NONE && |
3279 | spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE) | |
b128c09f BB |
3280 | zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; |
3281 | ||
3282 | if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) | |
3283 | zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; | |
428870ff BB |
3284 | |
3285 | /* | |
3286 | * Here is a possibly good place to attempt to do | |
3287 | * either combinatorial reconstruction or error correction | |
3288 | * based on checksums. It also might be a good place | |
3289 | * to send out preliminary ereports before we suspend | |
3290 | * processing. | |
3291 | */ | |
34dc7c2f BB |
3292 | } |
3293 | ||
3294 | /* | |
b128c09f BB |
3295 | * If there were logical child errors, they apply to us now. |
3296 | * We defer this until now to avoid conflating logical child | |
3297 | * errors with errors that happened to the zio itself when | |
3298 | * updating vdev stats and reporting FMA events above. | |
34dc7c2f | 3299 | */ |
b128c09f | 3300 | zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); |
34dc7c2f | 3301 | |
428870ff BB |
3302 | if ((zio->io_error || zio->io_reexecute) && |
3303 | IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && | |
03c6040b | 3304 | !(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE))) |
c776b317 | 3305 | zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp); |
9babb374 BB |
3306 | |
3307 | zio_gang_tree_free(&zio->io_gang_tree); | |
3308 | ||
3309 | /* | |
3310 | * Godfather I/Os should never suspend. | |
3311 | */ | |
3312 | if ((zio->io_flags & ZIO_FLAG_GODFATHER) && | |
3313 | (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) | |
3314 | zio->io_reexecute = 0; | |
3315 | ||
b128c09f BB |
3316 | if (zio->io_reexecute) { |
3317 | /* | |
3318 | * This is a logical I/O that wants to reexecute. | |
3319 | * | |
3320 | * Reexecute is top-down. When an i/o fails, if it's not | |
3321 | * the root, it simply notifies its parent and sticks around. | |
3322 | * The parent, seeing that it still has children in zio_done(), | |
3323 | * does the same. This percolates all the way up to the root. | |
3324 | * The root i/o will reexecute or suspend the entire tree. | |
3325 | * | |
3326 | * This approach ensures that zio_reexecute() honors | |
3327 | * all the original i/o dependency relationships, e.g. | |
3328 | * parents not executing until children are ready. | |
3329 | */ | |
3330 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
34dc7c2f | 3331 | |
9babb374 | 3332 | zio->io_gang_leader = NULL; |
b128c09f | 3333 | |
d164b209 BB |
3334 | mutex_enter(&zio->io_lock); |
3335 | zio->io_state[ZIO_WAIT_DONE] = 1; | |
3336 | mutex_exit(&zio->io_lock); | |
3337 | ||
9babb374 BB |
3338 | /* |
3339 | * "The Godfather" I/O monitors its children but is | |
3340 | * not a true parent to them. It will track them through | |
3341 | * the pipeline but severs its ties whenever they get into | |
3342 | * trouble (e.g. suspended). This allows "The Godfather" | |
3343 | * I/O to return status without blocking. | |
3344 | */ | |
3345 | for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) { | |
3346 | zio_link_t *zl = zio->io_walk_link; | |
3347 | pio_next = zio_walk_parents(zio); | |
3348 | ||
3349 | if ((pio->io_flags & ZIO_FLAG_GODFATHER) && | |
3350 | (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { | |
3351 | zio_remove_child(pio, zio, zl); | |
3352 | zio_notify_parent(pio, zio, ZIO_WAIT_DONE); | |
3353 | } | |
3354 | } | |
3355 | ||
d164b209 | 3356 | if ((pio = zio_unique_parent(zio)) != NULL) { |
b128c09f BB |
3357 | /* |
3358 | * We're not a root i/o, so there's nothing to do | |
3359 | * but notify our parent. Don't propagate errors | |
3360 | * upward since we haven't permanently failed yet. | |
3361 | */ | |
9babb374 | 3362 | ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); |
b128c09f BB |
3363 | zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; |
3364 | zio_notify_parent(pio, zio, ZIO_WAIT_DONE); | |
3365 | } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { | |
3366 | /* | |
3367 | * We'd fail again if we reexecuted now, so suspend | |
3368 | * until conditions improve (e.g. device comes online). | |
3369 | */ | |
c776b317 | 3370 | zio_suspend(zio->io_spa, zio); |
b128c09f BB |
3371 | } else { |
3372 | /* | |
3373 | * Reexecution is potentially a huge amount of work. | |
3374 | * Hand it off to the otherwise-unused claim taskq. | |
3375 | */ | |
a38718a6 | 3376 | ASSERT(taskq_empty_ent(&zio->io_tqent)); |
7ef5e54e AL |
3377 | spa_taskq_dispatch_ent(zio->io_spa, |
3378 | ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE, | |
a38718a6 GA |
3379 | (task_func_t *)zio_reexecute, zio, 0, |
3380 | &zio->io_tqent); | |
b128c09f BB |
3381 | } |
3382 | return (ZIO_PIPELINE_STOP); | |
34dc7c2f BB |
3383 | } |
3384 | ||
428870ff | 3385 | ASSERT(zio->io_child_count == 0); |
b128c09f BB |
3386 | ASSERT(zio->io_reexecute == 0); |
3387 | ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); | |
34dc7c2f | 3388 | |
428870ff BB |
3389 | /* |
3390 | * Report any checksum errors, since the I/O is complete. | |
3391 | */ | |
3392 | while (zio->io_cksum_report != NULL) { | |
3393 | zio_cksum_report_t *zcr = zio->io_cksum_report; | |
3394 | zio->io_cksum_report = zcr->zcr_next; | |
3395 | zcr->zcr_next = NULL; | |
3396 | zcr->zcr_finish(zcr, NULL); | |
3397 | zfs_ereport_free_checksum(zcr); | |
3398 | } | |
3399 | ||
920dd524 | 3400 | if (zio->io_flags & ZIO_FLAG_FASTWRITE && zio->io_bp && |
9b67f605 MA |
3401 | !BP_IS_HOLE(zio->io_bp) && !BP_IS_EMBEDDED(zio->io_bp) && |
3402 | !(zio->io_flags & ZIO_FLAG_NOPWRITE)) { | |
920dd524 ED |
3403 | metaslab_fastwrite_unmark(zio->io_spa, zio->io_bp); |
3404 | } | |
3405 | ||
d164b209 BB |
3406 | /* |
3407 | * It is the responsibility of the done callback to ensure that this | |
3408 | * particular zio is no longer discoverable for adoption, and as | |
3409 | * such, cannot acquire any new parents. | |
3410 | */ | |
b128c09f BB |
3411 | if (zio->io_done) |
3412 | zio->io_done(zio); | |
34dc7c2f | 3413 | |
d164b209 BB |
3414 | mutex_enter(&zio->io_lock); |
3415 | zio->io_state[ZIO_WAIT_DONE] = 1; | |
3416 | mutex_exit(&zio->io_lock); | |
34dc7c2f | 3417 | |
d164b209 BB |
3418 | for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) { |
3419 | zio_link_t *zl = zio->io_walk_link; | |
3420 | pio_next = zio_walk_parents(zio); | |
3421 | zio_remove_child(pio, zio, zl); | |
b128c09f BB |
3422 | zio_notify_parent(pio, zio, ZIO_WAIT_DONE); |
3423 | } | |
34dc7c2f | 3424 | |
b128c09f BB |
3425 | if (zio->io_waiter != NULL) { |
3426 | mutex_enter(&zio->io_lock); | |
3427 | zio->io_executor = NULL; | |
3428 | cv_broadcast(&zio->io_cv); | |
3429 | mutex_exit(&zio->io_lock); | |
3430 | } else { | |
3431 | zio_destroy(zio); | |
3432 | } | |
34dc7c2f | 3433 | |
b128c09f | 3434 | return (ZIO_PIPELINE_STOP); |
34dc7c2f BB |
3435 | } |
3436 | ||
3437 | /* | |
b128c09f BB |
3438 | * ========================================================================== |
3439 | * I/O pipeline definition | |
3440 | * ========================================================================== | |
34dc7c2f | 3441 | */ |
428870ff | 3442 | static zio_pipe_stage_t *zio_pipeline[] = { |
b128c09f | 3443 | NULL, |
b128c09f | 3444 | zio_read_bp_init, |
428870ff BB |
3445 | zio_free_bp_init, |
3446 | zio_issue_async, | |
b128c09f BB |
3447 | zio_write_bp_init, |
3448 | zio_checksum_generate, | |
03c6040b | 3449 | zio_nop_write, |
428870ff BB |
3450 | zio_ddt_read_start, |
3451 | zio_ddt_read_done, | |
3452 | zio_ddt_write, | |
3453 | zio_ddt_free, | |
b128c09f BB |
3454 | zio_gang_assemble, |
3455 | zio_gang_issue, | |
3456 | zio_dva_allocate, | |
3457 | zio_dva_free, | |
3458 | zio_dva_claim, | |
3459 | zio_ready, | |
3460 | zio_vdev_io_start, | |
3461 | zio_vdev_io_done, | |
3462 | zio_vdev_io_assess, | |
3463 | zio_checksum_verify, | |
3464 | zio_done | |
3465 | }; | |
c28b2279 | 3466 | |
9ae529ec | 3467 | |
9ae529ec | 3468 | |
9ae529ec | 3469 | |
fcff0f35 PD |
3470 | /* |
3471 | * Compare two zbookmark_phys_t's to see which we would reach first in a | |
3472 | * pre-order traversal of the object tree. | |
3473 | * | |
3474 | * This is simple in every case aside from the meta-dnode object. For all other | |
3475 | * objects, we traverse them in order (object 1 before object 2, and so on). | |
3476 | * However, all of these objects are traversed while traversing object 0, since | |
3477 | * the data it points to is the list of objects. Thus, we need to convert to a | |
3478 | * canonical representation so we can compare meta-dnode bookmarks to | |
3479 | * non-meta-dnode bookmarks. | |
3480 | * | |
3481 | * We do this by calculating "equivalents" for each field of the zbookmark. | |
3482 | * zbookmarks outside of the meta-dnode use their own object and level, and | |
3483 | * calculate the level 0 equivalent (the first L0 blkid that is contained in the | |
3484 | * blocks this bookmark refers to) by multiplying their blkid by their span | |
3485 | * (the number of L0 blocks contained within one block at their level). | |
3486 | * zbookmarks inside the meta-dnode calculate their object equivalent | |
3487 | * (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use | |
3488 | * level + 1<<31 (any value larger than a level could ever be) for their level. | |
3489 | * This causes them to always compare before a bookmark in their object | |
3490 | * equivalent, compare appropriately to bookmarks in other objects, and to | |
3491 | * compare appropriately to other bookmarks in the meta-dnode. | |
3492 | */ | |
3493 | int | |
3494 | zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2, | |
3495 | const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2) | |
3496 | { | |
3497 | /* | |
3498 | * These variables represent the "equivalent" values for the zbookmark, | |
3499 | * after converting zbookmarks inside the meta dnode to their | |
3500 | * normal-object equivalents. | |
3501 | */ | |
3502 | uint64_t zb1obj, zb2obj; | |
3503 | uint64_t zb1L0, zb2L0; | |
3504 | uint64_t zb1level, zb2level; | |
3505 | ||
3506 | if (zb1->zb_object == zb2->zb_object && | |
3507 | zb1->zb_level == zb2->zb_level && | |
3508 | zb1->zb_blkid == zb2->zb_blkid) | |
3509 | return (0); | |
9ae529ec | 3510 | |
fcff0f35 PD |
3511 | /* |
3512 | * BP_SPANB calculates the span in blocks. | |
3513 | */ | |
3514 | zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level); | |
3515 | zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level); | |
9ae529ec CS |
3516 | |
3517 | if (zb1->zb_object == DMU_META_DNODE_OBJECT) { | |
fcff0f35 PD |
3518 | zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); |
3519 | zb1L0 = 0; | |
3520 | zb1level = zb1->zb_level + COMPARE_META_LEVEL; | |
3521 | } else { | |
3522 | zb1obj = zb1->zb_object; | |
3523 | zb1level = zb1->zb_level; | |
9ae529ec CS |
3524 | } |
3525 | ||
fcff0f35 PD |
3526 | if (zb2->zb_object == DMU_META_DNODE_OBJECT) { |
3527 | zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT)); | |
3528 | zb2L0 = 0; | |
3529 | zb2level = zb2->zb_level + COMPARE_META_LEVEL; | |
3530 | } else { | |
3531 | zb2obj = zb2->zb_object; | |
3532 | zb2level = zb2->zb_level; | |
3533 | } | |
3534 | ||
3535 | /* Now that we have a canonical representation, do the comparison. */ | |
3536 | if (zb1obj != zb2obj) | |
3537 | return (zb1obj < zb2obj ? -1 : 1); | |
3538 | else if (zb1L0 != zb2L0) | |
3539 | return (zb1L0 < zb2L0 ? -1 : 1); | |
3540 | else if (zb1level != zb2level) | |
3541 | return (zb1level > zb2level ? -1 : 1); | |
3542 | /* | |
3543 | * This can (theoretically) happen if the bookmarks have the same object | |
3544 | * and level, but different blkids, if the block sizes are not the same. | |
3545 | * There is presently no way to change the indirect block sizes | |
3546 | */ | |
3547 | return (0); | |
3548 | } | |
3549 | ||
3550 | /* | |
3551 | * This function checks the following: given that last_block is the place that | |
3552 | * our traversal stopped last time, does that guarantee that we've visited | |
3553 | * every node under subtree_root? Therefore, we can't just use the raw output | |
3554 | * of zbookmark_compare. We have to pass in a modified version of | |
3555 | * subtree_root; by incrementing the block id, and then checking whether | |
3556 | * last_block is before or equal to that, we can tell whether or not having | |
3557 | * visited last_block implies that all of subtree_root's children have been | |
3558 | * visited. | |
3559 | */ | |
3560 | boolean_t | |
3561 | zbookmark_subtree_completed(const dnode_phys_t *dnp, | |
3562 | const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block) | |
3563 | { | |
3564 | zbookmark_phys_t mod_zb = *subtree_root; | |
3565 | mod_zb.zb_blkid++; | |
3566 | ASSERT(last_block->zb_level == 0); | |
3567 | ||
3568 | /* The objset_phys_t isn't before anything. */ | |
3569 | if (dnp == NULL) | |
9ae529ec | 3570 | return (B_FALSE); |
fcff0f35 PD |
3571 | |
3572 | /* | |
3573 | * We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the | |
3574 | * data block size in sectors, because that variable is only used if | |
3575 | * the bookmark refers to a block in the meta-dnode. Since we don't | |
3576 | * know without examining it what object it refers to, and there's no | |
3577 | * harm in passing in this value in other cases, we always pass it in. | |
3578 | * | |
3579 | * We pass in 0 for the indirect block size shift because zb2 must be | |
3580 | * level 0. The indirect block size is only used to calculate the span | |
3581 | * of the bookmark, but since the bookmark must be level 0, the span is | |
3582 | * always 1, so the math works out. | |
3583 | * | |
3584 | * If you make changes to how the zbookmark_compare code works, be sure | |
3585 | * to make sure that this code still works afterwards. | |
3586 | */ | |
3587 | return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift, | |
3588 | 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb, | |
3589 | last_block) <= 0); | |
9ae529ec CS |
3590 | } |
3591 | ||
c28b2279 | 3592 | #if defined(_KERNEL) && defined(HAVE_SPL) |
c28b2279 | 3593 | EXPORT_SYMBOL(zio_type_name); |
81971b13 BB |
3594 | EXPORT_SYMBOL(zio_buf_alloc); |
3595 | EXPORT_SYMBOL(zio_data_buf_alloc); | |
6fe53787 | 3596 | EXPORT_SYMBOL(zio_buf_alloc_flags); |
81971b13 BB |
3597 | EXPORT_SYMBOL(zio_buf_free); |
3598 | EXPORT_SYMBOL(zio_data_buf_free); | |
c28b2279 | 3599 | |
a69052be | 3600 | module_param(zio_delay_max, int, 0644); |
c409e464 BB |
3601 | MODULE_PARM_DESC(zio_delay_max, "Max zio millisec delay before posting event"); |
3602 | ||
3603 | module_param(zio_requeue_io_start_cut_in_line, int, 0644); | |
3604 | MODULE_PARM_DESC(zio_requeue_io_start_cut_in_line, "Prioritize requeued I/O"); | |
29dee3ee CP |
3605 | |
3606 | module_param(zfs_sync_pass_deferred_free, int, 0644); | |
3607 | MODULE_PARM_DESC(zfs_sync_pass_deferred_free, | |
d1d7e268 | 3608 | "Defer frees starting in this pass"); |
29dee3ee CP |
3609 | |
3610 | module_param(zfs_sync_pass_dont_compress, int, 0644); | |
3611 | MODULE_PARM_DESC(zfs_sync_pass_dont_compress, | |
d1d7e268 | 3612 | "Don't compress starting in this pass"); |
29dee3ee CP |
3613 | |
3614 | module_param(zfs_sync_pass_rewrite, int, 0644); | |
3615 | MODULE_PARM_DESC(zfs_sync_pass_rewrite, | |
d1d7e268 | 3616 | "Rewrite new bps starting in this pass"); |
c28b2279 | 3617 | #endif |