]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
ebf8e3a2 | 23 | * Copyright (c) 2012 by Delphix. All rights reserved. |
a38718a6 | 24 | * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved. |
34dc7c2f BB |
25 | */ |
26 | ||
34dc7c2f BB |
27 | #include <sys/zfs_context.h> |
28 | #include <sys/fm/fs/zfs.h> | |
29 | #include <sys/spa.h> | |
30 | #include <sys/txg.h> | |
31 | #include <sys/spa_impl.h> | |
32 | #include <sys/vdev_impl.h> | |
33 | #include <sys/zio_impl.h> | |
34 | #include <sys/zio_compress.h> | |
35 | #include <sys/zio_checksum.h> | |
428870ff BB |
36 | #include <sys/dmu_objset.h> |
37 | #include <sys/arc.h> | |
38 | #include <sys/ddt.h> | |
34dc7c2f BB |
39 | |
40 | /* | |
41 | * ========================================================================== | |
42 | * I/O priority table | |
43 | * ========================================================================== | |
44 | */ | |
45 | uint8_t zio_priority_table[ZIO_PRIORITY_TABLE_SIZE] = { | |
46 | 0, /* ZIO_PRIORITY_NOW */ | |
47 | 0, /* ZIO_PRIORITY_SYNC_READ */ | |
48 | 0, /* ZIO_PRIORITY_SYNC_WRITE */ | |
34dc7c2f | 49 | 0, /* ZIO_PRIORITY_LOG_WRITE */ |
428870ff BB |
50 | 1, /* ZIO_PRIORITY_CACHE_FILL */ |
51 | 1, /* ZIO_PRIORITY_AGG */ | |
52 | 4, /* ZIO_PRIORITY_FREE */ | |
53 | 4, /* ZIO_PRIORITY_ASYNC_WRITE */ | |
54 | 6, /* ZIO_PRIORITY_ASYNC_READ */ | |
34dc7c2f BB |
55 | 10, /* ZIO_PRIORITY_RESILVER */ |
56 | 20, /* ZIO_PRIORITY_SCRUB */ | |
428870ff | 57 | 2, /* ZIO_PRIORITY_DDT_PREFETCH */ |
34dc7c2f BB |
58 | }; |
59 | ||
60 | /* | |
61 | * ========================================================================== | |
62 | * I/O type descriptions | |
63 | * ========================================================================== | |
64 | */ | |
65 | char *zio_type_name[ZIO_TYPES] = { | |
451041db | 66 | "z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_ioctl" |
428870ff | 67 | }; |
34dc7c2f BB |
68 | |
69 | /* | |
70 | * ========================================================================== | |
71 | * I/O kmem caches | |
72 | * ========================================================================== | |
73 | */ | |
74 | kmem_cache_t *zio_cache; | |
d164b209 | 75 | kmem_cache_t *zio_link_cache; |
86dd0fd9 | 76 | kmem_cache_t *zio_vdev_cache; |
34dc7c2f BB |
77 | kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; |
78 | kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; | |
c28b2279 | 79 | int zio_bulk_flags = 0; |
a69052be | 80 | int zio_delay_max = ZIO_DELAY_MAX; |
34dc7c2f BB |
81 | |
82 | #ifdef _KERNEL | |
83 | extern vmem_t *zio_alloc_arena; | |
84 | #endif | |
6d974228 | 85 | extern int zfs_mg_alloc_failures; |
34dc7c2f BB |
86 | |
87 | /* | |
b128c09f BB |
88 | * An allocating zio is one that either currently has the DVA allocate |
89 | * stage set or will have it later in its lifetime. | |
34dc7c2f | 90 | */ |
428870ff BB |
91 | #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) |
92 | ||
c409e464 | 93 | int zio_requeue_io_start_cut_in_line = 1; |
428870ff BB |
94 | |
95 | #ifdef ZFS_DEBUG | |
96 | int zio_buf_debug_limit = 16384; | |
97 | #else | |
98 | int zio_buf_debug_limit = 0; | |
99 | #endif | |
34dc7c2f | 100 | |
da6b4005 NB |
101 | static inline void __zio_execute(zio_t *zio); |
102 | ||
49be0ccf BB |
103 | static int |
104 | zio_cons(void *arg, void *unused, int kmflag) | |
105 | { | |
106 | zio_t *zio = arg; | |
107 | ||
108 | bzero(zio, sizeof (zio_t)); | |
109 | ||
110 | mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL); | |
111 | cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); | |
112 | ||
113 | list_create(&zio->io_parent_list, sizeof (zio_link_t), | |
114 | offsetof(zio_link_t, zl_parent_node)); | |
115 | list_create(&zio->io_child_list, sizeof (zio_link_t), | |
116 | offsetof(zio_link_t, zl_child_node)); | |
117 | ||
118 | return (0); | |
119 | } | |
120 | ||
121 | static void | |
122 | zio_dest(void *arg, void *unused) | |
123 | { | |
124 | zio_t *zio = arg; | |
125 | ||
126 | mutex_destroy(&zio->io_lock); | |
127 | cv_destroy(&zio->io_cv); | |
128 | list_destroy(&zio->io_parent_list); | |
129 | list_destroy(&zio->io_child_list); | |
130 | } | |
131 | ||
34dc7c2f BB |
132 | void |
133 | zio_init(void) | |
134 | { | |
135 | size_t c; | |
136 | vmem_t *data_alloc_arena = NULL; | |
137 | ||
138 | #ifdef _KERNEL | |
139 | data_alloc_arena = zio_alloc_arena; | |
140 | #endif | |
49be0ccf BB |
141 | zio_cache = kmem_cache_create("zio_cache", sizeof (zio_t), 0, |
142 | zio_cons, zio_dest, NULL, NULL, NULL, KMC_KMEM); | |
d164b209 | 143 | zio_link_cache = kmem_cache_create("zio_link_cache", |
ae6ba3db | 144 | sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, KMC_KMEM); |
86dd0fd9 BB |
145 | zio_vdev_cache = kmem_cache_create("zio_vdev_cache", sizeof(vdev_io_t), |
146 | PAGESIZE, NULL, NULL, NULL, NULL, NULL, KMC_VMEM); | |
34dc7c2f BB |
147 | |
148 | /* | |
149 | * For small buffers, we want a cache for each multiple of | |
150 | * SPA_MINBLOCKSIZE. For medium-size buffers, we want a cache | |
151 | * for each quarter-power of 2. For large buffers, we want | |
152 | * a cache for each multiple of PAGESIZE. | |
153 | */ | |
154 | for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { | |
155 | size_t size = (c + 1) << SPA_MINBLOCKSHIFT; | |
156 | size_t p2 = size; | |
157 | size_t align = 0; | |
158 | ||
159 | while (p2 & (p2 - 1)) | |
160 | p2 &= p2 - 1; | |
161 | ||
162 | if (size <= 4 * SPA_MINBLOCKSIZE) { | |
163 | align = SPA_MINBLOCKSIZE; | |
164 | } else if (P2PHASE(size, PAGESIZE) == 0) { | |
165 | align = PAGESIZE; | |
166 | } else if (P2PHASE(size, p2 >> 2) == 0) { | |
167 | align = p2 >> 2; | |
168 | } | |
169 | ||
170 | if (align != 0) { | |
171 | char name[36]; | |
ae6ba3db BB |
172 | int flags = zio_bulk_flags; |
173 | ||
174 | /* | |
175 | * The smallest buffers (512b) are heavily used and | |
176 | * experience a lot of churn. The slabs allocated | |
177 | * for them are also relatively small (32K). Thus | |
178 | * in over to avoid expensive calls to vmalloc() we | |
179 | * make an exception to the usual slab allocation | |
180 | * policy and force these buffers to be kmem backed. | |
181 | */ | |
182 | if (size == (1 << SPA_MINBLOCKSHIFT)) | |
183 | flags |= KMC_KMEM; | |
184 | ||
34dc7c2f BB |
185 | (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); |
186 | zio_buf_cache[c] = kmem_cache_create(name, size, | |
ae6ba3db | 187 | align, NULL, NULL, NULL, NULL, NULL, flags); |
34dc7c2f BB |
188 | |
189 | (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); | |
190 | zio_data_buf_cache[c] = kmem_cache_create(name, size, | |
ae6ba3db BB |
191 | align, NULL, NULL, NULL, NULL, |
192 | data_alloc_arena, flags); | |
34dc7c2f BB |
193 | } |
194 | } | |
195 | ||
196 | while (--c != 0) { | |
197 | ASSERT(zio_buf_cache[c] != NULL); | |
198 | if (zio_buf_cache[c - 1] == NULL) | |
199 | zio_buf_cache[c - 1] = zio_buf_cache[c]; | |
200 | ||
201 | ASSERT(zio_data_buf_cache[c] != NULL); | |
202 | if (zio_data_buf_cache[c - 1] == NULL) | |
203 | zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; | |
204 | } | |
205 | ||
6d974228 GW |
206 | /* |
207 | * The zio write taskqs have 1 thread per cpu, allow 1/2 of the taskqs | |
208 | * to fail 3 times per txg or 8 failures, whichever is greater. | |
209 | */ | |
210 | zfs_mg_alloc_failures = MAX((3 * max_ncpus / 2), 8); | |
211 | ||
34dc7c2f | 212 | zio_inject_init(); |
9759c60f ED |
213 | |
214 | lz4_init(); | |
34dc7c2f BB |
215 | } |
216 | ||
217 | void | |
218 | zio_fini(void) | |
219 | { | |
220 | size_t c; | |
221 | kmem_cache_t *last_cache = NULL; | |
222 | kmem_cache_t *last_data_cache = NULL; | |
223 | ||
224 | for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { | |
225 | if (zio_buf_cache[c] != last_cache) { | |
226 | last_cache = zio_buf_cache[c]; | |
227 | kmem_cache_destroy(zio_buf_cache[c]); | |
228 | } | |
229 | zio_buf_cache[c] = NULL; | |
230 | ||
231 | if (zio_data_buf_cache[c] != last_data_cache) { | |
232 | last_data_cache = zio_data_buf_cache[c]; | |
233 | kmem_cache_destroy(zio_data_buf_cache[c]); | |
234 | } | |
235 | zio_data_buf_cache[c] = NULL; | |
236 | } | |
237 | ||
86dd0fd9 | 238 | kmem_cache_destroy(zio_vdev_cache); |
d164b209 | 239 | kmem_cache_destroy(zio_link_cache); |
34dc7c2f BB |
240 | kmem_cache_destroy(zio_cache); |
241 | ||
242 | zio_inject_fini(); | |
9759c60f ED |
243 | |
244 | lz4_fini(); | |
34dc7c2f BB |
245 | } |
246 | ||
247 | /* | |
248 | * ========================================================================== | |
249 | * Allocate and free I/O buffers | |
250 | * ========================================================================== | |
251 | */ | |
252 | ||
253 | /* | |
254 | * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a | |
255 | * crashdump if the kernel panics, so use it judiciously. Obviously, it's | |
256 | * useful to inspect ZFS metadata, but if possible, we should avoid keeping | |
257 | * excess / transient data in-core during a crashdump. | |
258 | */ | |
259 | void * | |
260 | zio_buf_alloc(size_t size) | |
261 | { | |
262 | size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; | |
263 | ||
264 | ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); | |
265 | ||
ebcfc8a5 | 266 | return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE | KM_NODEBUG)); |
34dc7c2f BB |
267 | } |
268 | ||
269 | /* | |
270 | * Use zio_data_buf_alloc to allocate data. The data will not appear in a | |
271 | * crashdump if the kernel panics. This exists so that we will limit the amount | |
272 | * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount | |
273 | * of kernel heap dumped to disk when the kernel panics) | |
274 | */ | |
275 | void * | |
276 | zio_data_buf_alloc(size_t size) | |
277 | { | |
278 | size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; | |
279 | ||
280 | ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); | |
281 | ||
ebcfc8a5 BB |
282 | return (kmem_cache_alloc(zio_data_buf_cache[c], |
283 | KM_PUSHPAGE | KM_NODEBUG)); | |
34dc7c2f BB |
284 | } |
285 | ||
286 | void | |
287 | zio_buf_free(void *buf, size_t size) | |
288 | { | |
289 | size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; | |
290 | ||
291 | ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); | |
292 | ||
293 | kmem_cache_free(zio_buf_cache[c], buf); | |
294 | } | |
295 | ||
296 | void | |
297 | zio_data_buf_free(void *buf, size_t size) | |
298 | { | |
299 | size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; | |
300 | ||
301 | ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); | |
302 | ||
303 | kmem_cache_free(zio_data_buf_cache[c], buf); | |
304 | } | |
305 | ||
86dd0fd9 BB |
306 | /* |
307 | * Dedicated I/O buffers to ensure that memory fragmentation never prevents | |
308 | * or significantly delays the issuing of a zio. These buffers are used | |
309 | * to aggregate I/O and could be used for raidz stripes. | |
310 | */ | |
311 | void * | |
312 | zio_vdev_alloc(void) | |
313 | { | |
314 | return (kmem_cache_alloc(zio_vdev_cache, KM_PUSHPAGE)); | |
315 | } | |
316 | ||
317 | void | |
318 | zio_vdev_free(void *buf) | |
319 | { | |
320 | kmem_cache_free(zio_vdev_cache, buf); | |
321 | ||
322 | } | |
323 | ||
34dc7c2f BB |
324 | /* |
325 | * ========================================================================== | |
326 | * Push and pop I/O transform buffers | |
327 | * ========================================================================== | |
328 | */ | |
329 | static void | |
b128c09f BB |
330 | zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize, |
331 | zio_transform_func_t *transform) | |
34dc7c2f | 332 | { |
691f6ac4 | 333 | zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_PUSHPAGE); |
34dc7c2f | 334 | |
b128c09f BB |
335 | zt->zt_orig_data = zio->io_data; |
336 | zt->zt_orig_size = zio->io_size; | |
34dc7c2f | 337 | zt->zt_bufsize = bufsize; |
b128c09f | 338 | zt->zt_transform = transform; |
34dc7c2f BB |
339 | |
340 | zt->zt_next = zio->io_transform_stack; | |
341 | zio->io_transform_stack = zt; | |
342 | ||
343 | zio->io_data = data; | |
344 | zio->io_size = size; | |
345 | } | |
346 | ||
347 | static void | |
b128c09f | 348 | zio_pop_transforms(zio_t *zio) |
34dc7c2f | 349 | { |
b128c09f BB |
350 | zio_transform_t *zt; |
351 | ||
352 | while ((zt = zio->io_transform_stack) != NULL) { | |
353 | if (zt->zt_transform != NULL) | |
354 | zt->zt_transform(zio, | |
355 | zt->zt_orig_data, zt->zt_orig_size); | |
34dc7c2f | 356 | |
428870ff BB |
357 | if (zt->zt_bufsize != 0) |
358 | zio_buf_free(zio->io_data, zt->zt_bufsize); | |
34dc7c2f | 359 | |
b128c09f BB |
360 | zio->io_data = zt->zt_orig_data; |
361 | zio->io_size = zt->zt_orig_size; | |
362 | zio->io_transform_stack = zt->zt_next; | |
34dc7c2f | 363 | |
b128c09f | 364 | kmem_free(zt, sizeof (zio_transform_t)); |
34dc7c2f BB |
365 | } |
366 | } | |
367 | ||
b128c09f BB |
368 | /* |
369 | * ========================================================================== | |
370 | * I/O transform callbacks for subblocks and decompression | |
371 | * ========================================================================== | |
372 | */ | |
373 | static void | |
374 | zio_subblock(zio_t *zio, void *data, uint64_t size) | |
375 | { | |
376 | ASSERT(zio->io_size > size); | |
377 | ||
378 | if (zio->io_type == ZIO_TYPE_READ) | |
379 | bcopy(zio->io_data, data, size); | |
380 | } | |
381 | ||
382 | static void | |
383 | zio_decompress(zio_t *zio, void *data, uint64_t size) | |
384 | { | |
385 | if (zio->io_error == 0 && | |
386 | zio_decompress_data(BP_GET_COMPRESS(zio->io_bp), | |
428870ff | 387 | zio->io_data, data, zio->io_size, size) != 0) |
b128c09f BB |
388 | zio->io_error = EIO; |
389 | } | |
390 | ||
391 | /* | |
392 | * ========================================================================== | |
393 | * I/O parent/child relationships and pipeline interlocks | |
394 | * ========================================================================== | |
395 | */ | |
d164b209 BB |
396 | /* |
397 | * NOTE - Callers to zio_walk_parents() and zio_walk_children must | |
398 | * continue calling these functions until they return NULL. | |
399 | * Otherwise, the next caller will pick up the list walk in | |
400 | * some indeterminate state. (Otherwise every caller would | |
401 | * have to pass in a cookie to keep the state represented by | |
402 | * io_walk_link, which gets annoying.) | |
403 | */ | |
404 | zio_t * | |
405 | zio_walk_parents(zio_t *cio) | |
406 | { | |
407 | zio_link_t *zl = cio->io_walk_link; | |
408 | list_t *pl = &cio->io_parent_list; | |
b128c09f | 409 | |
d164b209 BB |
410 | zl = (zl == NULL) ? list_head(pl) : list_next(pl, zl); |
411 | cio->io_walk_link = zl; | |
412 | ||
413 | if (zl == NULL) | |
414 | return (NULL); | |
415 | ||
416 | ASSERT(zl->zl_child == cio); | |
417 | return (zl->zl_parent); | |
418 | } | |
419 | ||
420 | zio_t * | |
421 | zio_walk_children(zio_t *pio) | |
422 | { | |
423 | zio_link_t *zl = pio->io_walk_link; | |
424 | list_t *cl = &pio->io_child_list; | |
425 | ||
426 | zl = (zl == NULL) ? list_head(cl) : list_next(cl, zl); | |
427 | pio->io_walk_link = zl; | |
428 | ||
429 | if (zl == NULL) | |
430 | return (NULL); | |
431 | ||
432 | ASSERT(zl->zl_parent == pio); | |
433 | return (zl->zl_child); | |
434 | } | |
435 | ||
436 | zio_t * | |
437 | zio_unique_parent(zio_t *cio) | |
438 | { | |
439 | zio_t *pio = zio_walk_parents(cio); | |
440 | ||
441 | VERIFY(zio_walk_parents(cio) == NULL); | |
442 | return (pio); | |
443 | } | |
444 | ||
445 | void | |
446 | zio_add_child(zio_t *pio, zio_t *cio) | |
b128c09f | 447 | { |
691f6ac4 | 448 | zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_PUSHPAGE); |
d6320ddb | 449 | int w; |
d164b209 BB |
450 | |
451 | /* | |
452 | * Logical I/Os can have logical, gang, or vdev children. | |
453 | * Gang I/Os can have gang or vdev children. | |
454 | * Vdev I/Os can only have vdev children. | |
455 | * The following ASSERT captures all of these constraints. | |
456 | */ | |
457 | ASSERT(cio->io_child_type <= pio->io_child_type); | |
458 | ||
459 | zl->zl_parent = pio; | |
460 | zl->zl_child = cio; | |
461 | ||
462 | mutex_enter(&cio->io_lock); | |
b128c09f | 463 | mutex_enter(&pio->io_lock); |
d164b209 BB |
464 | |
465 | ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); | |
466 | ||
d6320ddb | 467 | for (w = 0; w < ZIO_WAIT_TYPES; w++) |
d164b209 BB |
468 | pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; |
469 | ||
470 | list_insert_head(&pio->io_child_list, zl); | |
471 | list_insert_head(&cio->io_parent_list, zl); | |
472 | ||
428870ff BB |
473 | pio->io_child_count++; |
474 | cio->io_parent_count++; | |
475 | ||
b128c09f | 476 | mutex_exit(&pio->io_lock); |
d164b209 | 477 | mutex_exit(&cio->io_lock); |
b128c09f BB |
478 | } |
479 | ||
34dc7c2f | 480 | static void |
d164b209 | 481 | zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl) |
b128c09f | 482 | { |
d164b209 BB |
483 | ASSERT(zl->zl_parent == pio); |
484 | ASSERT(zl->zl_child == cio); | |
b128c09f | 485 | |
d164b209 | 486 | mutex_enter(&cio->io_lock); |
b128c09f | 487 | mutex_enter(&pio->io_lock); |
d164b209 BB |
488 | |
489 | list_remove(&pio->io_child_list, zl); | |
490 | list_remove(&cio->io_parent_list, zl); | |
491 | ||
428870ff BB |
492 | pio->io_child_count--; |
493 | cio->io_parent_count--; | |
494 | ||
b128c09f | 495 | mutex_exit(&pio->io_lock); |
d164b209 BB |
496 | mutex_exit(&cio->io_lock); |
497 | ||
498 | kmem_cache_free(zio_link_cache, zl); | |
b128c09f BB |
499 | } |
500 | ||
501 | static boolean_t | |
502 | zio_wait_for_children(zio_t *zio, enum zio_child child, enum zio_wait_type wait) | |
34dc7c2f | 503 | { |
b128c09f BB |
504 | uint64_t *countp = &zio->io_children[child][wait]; |
505 | boolean_t waiting = B_FALSE; | |
506 | ||
507 | mutex_enter(&zio->io_lock); | |
508 | ASSERT(zio->io_stall == NULL); | |
509 | if (*countp != 0) { | |
428870ff | 510 | zio->io_stage >>= 1; |
b128c09f BB |
511 | zio->io_stall = countp; |
512 | waiting = B_TRUE; | |
513 | } | |
514 | mutex_exit(&zio->io_lock); | |
515 | ||
516 | return (waiting); | |
517 | } | |
34dc7c2f | 518 | |
bf701a83 BB |
519 | __attribute__((always_inline)) |
520 | static inline void | |
b128c09f BB |
521 | zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait) |
522 | { | |
523 | uint64_t *countp = &pio->io_children[zio->io_child_type][wait]; | |
524 | int *errorp = &pio->io_child_error[zio->io_child_type]; | |
34dc7c2f | 525 | |
b128c09f BB |
526 | mutex_enter(&pio->io_lock); |
527 | if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) | |
528 | *errorp = zio_worst_error(*errorp, zio->io_error); | |
529 | pio->io_reexecute |= zio->io_reexecute; | |
530 | ASSERT3U(*countp, >, 0); | |
531 | if (--*countp == 0 && pio->io_stall == countp) { | |
532 | pio->io_stall = NULL; | |
533 | mutex_exit(&pio->io_lock); | |
da6b4005 | 534 | __zio_execute(pio); |
b128c09f BB |
535 | } else { |
536 | mutex_exit(&pio->io_lock); | |
34dc7c2f BB |
537 | } |
538 | } | |
539 | ||
b128c09f BB |
540 | static void |
541 | zio_inherit_child_errors(zio_t *zio, enum zio_child c) | |
542 | { | |
543 | if (zio->io_child_error[c] != 0 && zio->io_error == 0) | |
544 | zio->io_error = zio->io_child_error[c]; | |
545 | } | |
546 | ||
34dc7c2f BB |
547 | /* |
548 | * ========================================================================== | |
b128c09f | 549 | * Create the various types of I/O (read, write, free, etc) |
34dc7c2f BB |
550 | * ========================================================================== |
551 | */ | |
552 | static zio_t * | |
428870ff | 553 | zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, |
34dc7c2f | 554 | void *data, uint64_t size, zio_done_func_t *done, void *private, |
428870ff BB |
555 | zio_type_t type, int priority, enum zio_flag flags, |
556 | vdev_t *vd, uint64_t offset, const zbookmark_t *zb, | |
557 | enum zio_stage stage, enum zio_stage pipeline) | |
34dc7c2f BB |
558 | { |
559 | zio_t *zio; | |
560 | ||
561 | ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); | |
562 | ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0); | |
b128c09f BB |
563 | ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); |
564 | ||
565 | ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER)); | |
566 | ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER)); | |
567 | ASSERT(vd || stage == ZIO_STAGE_OPEN); | |
34dc7c2f | 568 | |
691f6ac4 | 569 | zio = kmem_cache_alloc(zio_cache, KM_PUSHPAGE); |
d164b209 | 570 | |
b128c09f BB |
571 | if (vd != NULL) |
572 | zio->io_child_type = ZIO_CHILD_VDEV; | |
573 | else if (flags & ZIO_FLAG_GANG_CHILD) | |
574 | zio->io_child_type = ZIO_CHILD_GANG; | |
428870ff BB |
575 | else if (flags & ZIO_FLAG_DDT_CHILD) |
576 | zio->io_child_type = ZIO_CHILD_DDT; | |
b128c09f BB |
577 | else |
578 | zio->io_child_type = ZIO_CHILD_LOGICAL; | |
579 | ||
34dc7c2f | 580 | if (bp != NULL) { |
49be0ccf | 581 | zio->io_logical = NULL; |
428870ff | 582 | zio->io_bp = (blkptr_t *)bp; |
34dc7c2f BB |
583 | zio->io_bp_copy = *bp; |
584 | zio->io_bp_orig = *bp; | |
428870ff BB |
585 | if (type != ZIO_TYPE_WRITE || |
586 | zio->io_child_type == ZIO_CHILD_DDT) | |
b128c09f | 587 | zio->io_bp = &zio->io_bp_copy; /* so caller can free */ |
9babb374 | 588 | if (zio->io_child_type == ZIO_CHILD_LOGICAL) |
b128c09f | 589 | zio->io_logical = zio; |
9babb374 BB |
590 | if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp)) |
591 | pipeline |= ZIO_GANG_STAGES; | |
49be0ccf BB |
592 | } else { |
593 | zio->io_logical = NULL; | |
594 | zio->io_bp = NULL; | |
595 | bzero(&zio->io_bp_copy, sizeof (blkptr_t)); | |
596 | bzero(&zio->io_bp_orig, sizeof (blkptr_t)); | |
34dc7c2f | 597 | } |
b128c09f BB |
598 | |
599 | zio->io_spa = spa; | |
600 | zio->io_txg = txg; | |
49be0ccf | 601 | zio->io_ready = NULL; |
34dc7c2f BB |
602 | zio->io_done = done; |
603 | zio->io_private = private; | |
49be0ccf | 604 | zio->io_prev_space_delta = 0; |
34dc7c2f BB |
605 | zio->io_type = type; |
606 | zio->io_priority = priority; | |
b128c09f | 607 | zio->io_vd = vd; |
49be0ccf BB |
608 | zio->io_vsd = NULL; |
609 | zio->io_vsd_ops = NULL; | |
b128c09f | 610 | zio->io_offset = offset; |
49be0ccf | 611 | zio->io_deadline = 0; |
cc92e9d0 GW |
612 | zio->io_timestamp = 0; |
613 | zio->io_delta = 0; | |
614 | zio->io_delay = 0; | |
428870ff BB |
615 | zio->io_orig_data = zio->io_data = data; |
616 | zio->io_orig_size = zio->io_size = size; | |
b128c09f BB |
617 | zio->io_orig_flags = zio->io_flags = flags; |
618 | zio->io_orig_stage = zio->io_stage = stage; | |
619 | zio->io_orig_pipeline = zio->io_pipeline = pipeline; | |
49be0ccf BB |
620 | bzero(&zio->io_prop, sizeof (zio_prop_t)); |
621 | zio->io_cmd = 0; | |
622 | zio->io_reexecute = 0; | |
623 | zio->io_bp_override = NULL; | |
624 | zio->io_walk_link = NULL; | |
625 | zio->io_transform_stack = NULL; | |
49be0ccf BB |
626 | zio->io_error = 0; |
627 | zio->io_child_count = 0; | |
628 | zio->io_parent_count = 0; | |
629 | zio->io_stall = NULL; | |
630 | zio->io_gang_leader = NULL; | |
631 | zio->io_gang_tree = NULL; | |
632 | zio->io_executor = NULL; | |
633 | zio->io_waiter = NULL; | |
634 | zio->io_cksum_report = NULL; | |
635 | zio->io_ena = 0; | |
636 | bzero(zio->io_child_error, sizeof (int) * ZIO_CHILD_TYPES); | |
637 | bzero(zio->io_children, | |
638 | sizeof (uint64_t) * ZIO_CHILD_TYPES * ZIO_WAIT_TYPES); | |
639 | bzero(&zio->io_bookmark, sizeof (zbookmark_t)); | |
34dc7c2f | 640 | |
d164b209 BB |
641 | zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY); |
642 | zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE); | |
643 | ||
b128c09f BB |
644 | if (zb != NULL) |
645 | zio->io_bookmark = *zb; | |
646 | ||
647 | if (pio != NULL) { | |
b128c09f | 648 | if (zio->io_logical == NULL) |
34dc7c2f | 649 | zio->io_logical = pio->io_logical; |
9babb374 BB |
650 | if (zio->io_child_type == ZIO_CHILD_GANG) |
651 | zio->io_gang_leader = pio->io_gang_leader; | |
b128c09f | 652 | zio_add_child(pio, zio); |
34dc7c2f BB |
653 | } |
654 | ||
a38718a6 GA |
655 | taskq_init_ent(&zio->io_tqent); |
656 | ||
34dc7c2f BB |
657 | return (zio); |
658 | } | |
659 | ||
660 | static void | |
b128c09f | 661 | zio_destroy(zio_t *zio) |
34dc7c2f | 662 | { |
b128c09f | 663 | kmem_cache_free(zio_cache, zio); |
34dc7c2f BB |
664 | } |
665 | ||
666 | zio_t * | |
d164b209 | 667 | zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done, |
428870ff | 668 | void *private, enum zio_flag flags) |
34dc7c2f BB |
669 | { |
670 | zio_t *zio; | |
671 | ||
672 | zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, | |
d164b209 | 673 | ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL, |
b128c09f | 674 | ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE); |
34dc7c2f BB |
675 | |
676 | return (zio); | |
677 | } | |
678 | ||
679 | zio_t * | |
428870ff | 680 | zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags) |
34dc7c2f | 681 | { |
d164b209 | 682 | return (zio_null(NULL, spa, NULL, done, private, flags)); |
34dc7c2f BB |
683 | } |
684 | ||
685 | zio_t * | |
b128c09f BB |
686 | zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, |
687 | void *data, uint64_t size, zio_done_func_t *done, void *private, | |
428870ff | 688 | int priority, enum zio_flag flags, const zbookmark_t *zb) |
34dc7c2f BB |
689 | { |
690 | zio_t *zio; | |
691 | ||
428870ff | 692 | zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp, |
b128c09f BB |
693 | data, size, done, private, |
694 | ZIO_TYPE_READ, priority, flags, NULL, 0, zb, | |
428870ff BB |
695 | ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? |
696 | ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE); | |
34dc7c2f | 697 | |
b128c09f BB |
698 | return (zio); |
699 | } | |
34dc7c2f | 700 | |
34dc7c2f | 701 | zio_t * |
b128c09f | 702 | zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, |
428870ff | 703 | void *data, uint64_t size, const zio_prop_t *zp, |
b128c09f | 704 | zio_done_func_t *ready, zio_done_func_t *done, void *private, |
428870ff | 705 | int priority, enum zio_flag flags, const zbookmark_t *zb) |
34dc7c2f BB |
706 | { |
707 | zio_t *zio; | |
708 | ||
b128c09f BB |
709 | ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF && |
710 | zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS && | |
711 | zp->zp_compress >= ZIO_COMPRESS_OFF && | |
712 | zp->zp_compress < ZIO_COMPRESS_FUNCTIONS && | |
9ae529ec | 713 | DMU_OT_IS_VALID(zp->zp_type) && |
b128c09f | 714 | zp->zp_level < 32 && |
428870ff BB |
715 | zp->zp_copies > 0 && |
716 | zp->zp_copies <= spa_max_replication(spa) && | |
717 | zp->zp_dedup <= 1 && | |
718 | zp->zp_dedup_verify <= 1); | |
34dc7c2f BB |
719 | |
720 | zio = zio_create(pio, spa, txg, bp, data, size, done, private, | |
b128c09f | 721 | ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, |
428870ff BB |
722 | ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ? |
723 | ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE); | |
34dc7c2f BB |
724 | |
725 | zio->io_ready = ready; | |
b128c09f | 726 | zio->io_prop = *zp; |
34dc7c2f BB |
727 | |
728 | return (zio); | |
729 | } | |
730 | ||
731 | zio_t * | |
b128c09f BB |
732 | zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, void *data, |
733 | uint64_t size, zio_done_func_t *done, void *private, int priority, | |
428870ff | 734 | enum zio_flag flags, zbookmark_t *zb) |
34dc7c2f BB |
735 | { |
736 | zio_t *zio; | |
737 | ||
34dc7c2f | 738 | zio = zio_create(pio, spa, txg, bp, data, size, done, private, |
b128c09f BB |
739 | ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb, |
740 | ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); | |
34dc7c2f BB |
741 | |
742 | return (zio); | |
743 | } | |
744 | ||
428870ff BB |
745 | void |
746 | zio_write_override(zio_t *zio, blkptr_t *bp, int copies) | |
747 | { | |
748 | ASSERT(zio->io_type == ZIO_TYPE_WRITE); | |
749 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
750 | ASSERT(zio->io_stage == ZIO_STAGE_OPEN); | |
751 | ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa)); | |
752 | ||
753 | zio->io_prop.zp_copies = copies; | |
754 | zio->io_bp_override = bp; | |
755 | } | |
756 | ||
757 | void | |
758 | zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp) | |
759 | { | |
760 | bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp); | |
761 | } | |
762 | ||
34dc7c2f | 763 | zio_t * |
428870ff BB |
764 | zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, |
765 | enum zio_flag flags) | |
34dc7c2f BB |
766 | { |
767 | zio_t *zio; | |
768 | ||
428870ff BB |
769 | dprintf_bp(bp, "freeing in txg %llu, pass %u", |
770 | (longlong_t)txg, spa->spa_sync_pass); | |
34dc7c2f | 771 | |
428870ff BB |
772 | ASSERT(!BP_IS_HOLE(bp)); |
773 | ASSERT(spa_syncing_txg(spa) == txg); | |
774 | ASSERT(spa_sync_pass(spa) <= SYNC_PASS_DEFERRED_FREE); | |
34dc7c2f | 775 | |
b128c09f | 776 | zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), |
428870ff | 777 | NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_FREE, flags, |
b128c09f | 778 | NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_FREE_PIPELINE); |
34dc7c2f BB |
779 | |
780 | return (zio); | |
781 | } | |
782 | ||
783 | zio_t * | |
428870ff BB |
784 | zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, |
785 | zio_done_func_t *done, void *private, enum zio_flag flags) | |
34dc7c2f BB |
786 | { |
787 | zio_t *zio; | |
788 | ||
789 | /* | |
790 | * A claim is an allocation of a specific block. Claims are needed | |
791 | * to support immediate writes in the intent log. The issue is that | |
792 | * immediate writes contain committed data, but in a txg that was | |
793 | * *not* committed. Upon opening the pool after an unclean shutdown, | |
794 | * the intent log claims all blocks that contain immediate write data | |
795 | * so that the SPA knows they're in use. | |
796 | * | |
797 | * All claims *must* be resolved in the first txg -- before the SPA | |
798 | * starts allocating blocks -- so that nothing is allocated twice. | |
428870ff | 799 | * If txg == 0 we just verify that the block is claimable. |
34dc7c2f BB |
800 | */ |
801 | ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, spa_first_txg(spa)); | |
428870ff BB |
802 | ASSERT(txg == spa_first_txg(spa) || txg == 0); |
803 | ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(1M) */ | |
34dc7c2f | 804 | |
b128c09f BB |
805 | zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp), |
806 | done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, flags, | |
807 | NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); | |
34dc7c2f BB |
808 | |
809 | return (zio); | |
810 | } | |
811 | ||
812 | zio_t * | |
813 | zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, | |
428870ff | 814 | zio_done_func_t *done, void *private, int priority, enum zio_flag flags) |
34dc7c2f BB |
815 | { |
816 | zio_t *zio; | |
817 | int c; | |
818 | ||
819 | if (vd->vdev_children == 0) { | |
820 | zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, | |
b128c09f | 821 | ZIO_TYPE_IOCTL, priority, flags, vd, 0, NULL, |
34dc7c2f BB |
822 | ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); |
823 | ||
34dc7c2f BB |
824 | zio->io_cmd = cmd; |
825 | } else { | |
d164b209 | 826 | zio = zio_null(pio, spa, NULL, NULL, NULL, flags); |
34dc7c2f BB |
827 | |
828 | for (c = 0; c < vd->vdev_children; c++) | |
829 | zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, | |
830 | done, private, priority, flags)); | |
831 | } | |
832 | ||
833 | return (zio); | |
834 | } | |
835 | ||
34dc7c2f BB |
836 | zio_t * |
837 | zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, | |
838 | void *data, int checksum, zio_done_func_t *done, void *private, | |
428870ff | 839 | int priority, enum zio_flag flags, boolean_t labels) |
34dc7c2f BB |
840 | { |
841 | zio_t *zio; | |
34dc7c2f | 842 | |
b128c09f BB |
843 | ASSERT(vd->vdev_children == 0); |
844 | ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || | |
845 | offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); | |
846 | ASSERT3U(offset + size, <=, vd->vdev_psize); | |
34dc7c2f | 847 | |
b128c09f BB |
848 | zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, |
849 | ZIO_TYPE_READ, priority, flags, vd, offset, NULL, | |
34dc7c2f BB |
850 | ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); |
851 | ||
b128c09f | 852 | zio->io_prop.zp_checksum = checksum; |
34dc7c2f BB |
853 | |
854 | return (zio); | |
855 | } | |
856 | ||
857 | zio_t * | |
858 | zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, | |
859 | void *data, int checksum, zio_done_func_t *done, void *private, | |
428870ff | 860 | int priority, enum zio_flag flags, boolean_t labels) |
34dc7c2f | 861 | { |
34dc7c2f | 862 | zio_t *zio; |
34dc7c2f | 863 | |
b128c09f BB |
864 | ASSERT(vd->vdev_children == 0); |
865 | ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE || | |
866 | offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); | |
867 | ASSERT3U(offset + size, <=, vd->vdev_psize); | |
34dc7c2f | 868 | |
b128c09f BB |
869 | zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, done, private, |
870 | ZIO_TYPE_WRITE, priority, flags, vd, offset, NULL, | |
34dc7c2f BB |
871 | ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); |
872 | ||
b128c09f | 873 | zio->io_prop.zp_checksum = checksum; |
34dc7c2f | 874 | |
428870ff | 875 | if (zio_checksum_table[checksum].ci_eck) { |
34dc7c2f | 876 | /* |
428870ff | 877 | * zec checksums are necessarily destructive -- they modify |
b128c09f | 878 | * the end of the write buffer to hold the verifier/checksum. |
34dc7c2f | 879 | * Therefore, we must make a local copy in case the data is |
b128c09f | 880 | * being written to multiple places in parallel. |
34dc7c2f | 881 | */ |
b128c09f | 882 | void *wbuf = zio_buf_alloc(size); |
34dc7c2f | 883 | bcopy(data, wbuf, size); |
b128c09f | 884 | zio_push_transform(zio, wbuf, size, size, NULL); |
34dc7c2f BB |
885 | } |
886 | ||
887 | return (zio); | |
888 | } | |
889 | ||
890 | /* | |
b128c09f | 891 | * Create a child I/O to do some work for us. |
34dc7c2f BB |
892 | */ |
893 | zio_t * | |
b128c09f | 894 | zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset, |
428870ff | 895 | void *data, uint64_t size, int type, int priority, enum zio_flag flags, |
34dc7c2f BB |
896 | zio_done_func_t *done, void *private) |
897 | { | |
428870ff | 898 | enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE; |
b128c09f BB |
899 | zio_t *zio; |
900 | ||
901 | ASSERT(vd->vdev_parent == | |
902 | (pio->io_vd ? pio->io_vd : pio->io_spa->spa_root_vdev)); | |
34dc7c2f BB |
903 | |
904 | if (type == ZIO_TYPE_READ && bp != NULL) { | |
905 | /* | |
906 | * If we have the bp, then the child should perform the | |
907 | * checksum and the parent need not. This pushes error | |
908 | * detection as close to the leaves as possible and | |
909 | * eliminates redundant checksums in the interior nodes. | |
910 | */ | |
428870ff BB |
911 | pipeline |= ZIO_STAGE_CHECKSUM_VERIFY; |
912 | pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; | |
34dc7c2f BB |
913 | } |
914 | ||
b128c09f BB |
915 | if (vd->vdev_children == 0) |
916 | offset += VDEV_LABEL_START_SIZE; | |
917 | ||
428870ff BB |
918 | flags |= ZIO_VDEV_CHILD_FLAGS(pio) | ZIO_FLAG_DONT_PROPAGATE; |
919 | ||
920 | /* | |
921 | * If we've decided to do a repair, the write is not speculative -- | |
922 | * even if the original read was. | |
923 | */ | |
924 | if (flags & ZIO_FLAG_IO_REPAIR) | |
925 | flags &= ~ZIO_FLAG_SPECULATIVE; | |
926 | ||
b128c09f | 927 | zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, |
428870ff BB |
928 | done, private, type, priority, flags, vd, offset, &pio->io_bookmark, |
929 | ZIO_STAGE_VDEV_IO_START >> 1, pipeline); | |
34dc7c2f | 930 | |
b128c09f | 931 | return (zio); |
34dc7c2f BB |
932 | } |
933 | ||
b128c09f BB |
934 | zio_t * |
935 | zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, void *data, uint64_t size, | |
428870ff BB |
936 | int type, int priority, enum zio_flag flags, |
937 | zio_done_func_t *done, void *private) | |
34dc7c2f | 938 | { |
b128c09f | 939 | zio_t *zio; |
34dc7c2f | 940 | |
b128c09f | 941 | ASSERT(vd->vdev_ops->vdev_op_leaf); |
34dc7c2f | 942 | |
b128c09f BB |
943 | zio = zio_create(NULL, vd->vdev_spa, 0, NULL, |
944 | data, size, done, private, type, priority, | |
945 | flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY, | |
946 | vd, offset, NULL, | |
428870ff | 947 | ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE); |
34dc7c2f | 948 | |
b128c09f | 949 | return (zio); |
34dc7c2f BB |
950 | } |
951 | ||
952 | void | |
b128c09f | 953 | zio_flush(zio_t *zio, vdev_t *vd) |
34dc7c2f | 954 | { |
b128c09f BB |
955 | zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE, |
956 | NULL, NULL, ZIO_PRIORITY_NOW, | |
957 | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY)); | |
34dc7c2f BB |
958 | } |
959 | ||
428870ff BB |
960 | void |
961 | zio_shrink(zio_t *zio, uint64_t size) | |
962 | { | |
963 | ASSERT(zio->io_executor == NULL); | |
964 | ASSERT(zio->io_orig_size == zio->io_size); | |
965 | ASSERT(size <= zio->io_size); | |
966 | ||
967 | /* | |
968 | * We don't shrink for raidz because of problems with the | |
969 | * reconstruction when reading back less than the block size. | |
970 | * Note, BP_IS_RAIDZ() assumes no compression. | |
971 | */ | |
972 | ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); | |
973 | if (!BP_IS_RAIDZ(zio->io_bp)) | |
974 | zio->io_orig_size = zio->io_size = size; | |
975 | } | |
976 | ||
34dc7c2f BB |
977 | /* |
978 | * ========================================================================== | |
b128c09f | 979 | * Prepare to read and write logical blocks |
34dc7c2f BB |
980 | * ========================================================================== |
981 | */ | |
b128c09f | 982 | |
34dc7c2f | 983 | static int |
b128c09f | 984 | zio_read_bp_init(zio_t *zio) |
34dc7c2f | 985 | { |
b128c09f | 986 | blkptr_t *bp = zio->io_bp; |
34dc7c2f | 987 | |
fb5f0bc8 | 988 | if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && |
9babb374 BB |
989 | zio->io_child_type == ZIO_CHILD_LOGICAL && |
990 | !(zio->io_flags & ZIO_FLAG_RAW)) { | |
428870ff BB |
991 | uint64_t psize = BP_GET_PSIZE(bp); |
992 | void *cbuf = zio_buf_alloc(psize); | |
b128c09f | 993 | |
428870ff | 994 | zio_push_transform(zio, cbuf, psize, psize, zio_decompress); |
34dc7c2f | 995 | } |
34dc7c2f | 996 | |
9ae529ec | 997 | if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0) |
b128c09f BB |
998 | zio->io_flags |= ZIO_FLAG_DONT_CACHE; |
999 | ||
428870ff BB |
1000 | if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP) |
1001 | zio->io_flags |= ZIO_FLAG_DONT_CACHE; | |
1002 | ||
1003 | if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL) | |
1004 | zio->io_pipeline = ZIO_DDT_READ_PIPELINE; | |
1005 | ||
b128c09f | 1006 | return (ZIO_PIPELINE_CONTINUE); |
34dc7c2f BB |
1007 | } |
1008 | ||
b128c09f BB |
1009 | static int |
1010 | zio_write_bp_init(zio_t *zio) | |
34dc7c2f | 1011 | { |
428870ff | 1012 | spa_t *spa = zio->io_spa; |
b128c09f | 1013 | zio_prop_t *zp = &zio->io_prop; |
428870ff | 1014 | enum zio_compress compress = zp->zp_compress; |
34dc7c2f | 1015 | blkptr_t *bp = zio->io_bp; |
b128c09f | 1016 | uint64_t lsize = zio->io_size; |
428870ff | 1017 | uint64_t psize = lsize; |
b128c09f | 1018 | int pass = 1; |
34dc7c2f | 1019 | |
b128c09f BB |
1020 | /* |
1021 | * If our children haven't all reached the ready stage, | |
1022 | * wait for them and then repeat this pipeline stage. | |
1023 | */ | |
1024 | if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || | |
1025 | zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_READY)) | |
1026 | return (ZIO_PIPELINE_STOP); | |
34dc7c2f | 1027 | |
b128c09f BB |
1028 | if (!IO_IS_ALLOCATING(zio)) |
1029 | return (ZIO_PIPELINE_CONTINUE); | |
34dc7c2f | 1030 | |
428870ff BB |
1031 | ASSERT(zio->io_child_type != ZIO_CHILD_DDT); |
1032 | ||
1033 | if (zio->io_bp_override) { | |
1034 | ASSERT(bp->blk_birth != zio->io_txg); | |
1035 | ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0); | |
1036 | ||
1037 | *bp = *zio->io_bp_override; | |
1038 | zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; | |
1039 | ||
1040 | if (BP_IS_HOLE(bp) || !zp->zp_dedup) | |
1041 | return (ZIO_PIPELINE_CONTINUE); | |
1042 | ||
1043 | ASSERT(zio_checksum_table[zp->zp_checksum].ci_dedup || | |
1044 | zp->zp_dedup_verify); | |
1045 | ||
1046 | if (BP_GET_CHECKSUM(bp) == zp->zp_checksum) { | |
1047 | BP_SET_DEDUP(bp, 1); | |
1048 | zio->io_pipeline |= ZIO_STAGE_DDT_WRITE; | |
1049 | return (ZIO_PIPELINE_CONTINUE); | |
1050 | } | |
1051 | zio->io_bp_override = NULL; | |
1052 | BP_ZERO(bp); | |
1053 | } | |
34dc7c2f | 1054 | |
b128c09f BB |
1055 | if (bp->blk_birth == zio->io_txg) { |
1056 | /* | |
1057 | * We're rewriting an existing block, which means we're | |
1058 | * working on behalf of spa_sync(). For spa_sync() to | |
1059 | * converge, it must eventually be the case that we don't | |
1060 | * have to allocate new blocks. But compression changes | |
1061 | * the blocksize, which forces a reallocate, and makes | |
1062 | * convergence take longer. Therefore, after the first | |
1063 | * few passes, stop compressing to ensure convergence. | |
1064 | */ | |
428870ff BB |
1065 | pass = spa_sync_pass(spa); |
1066 | ||
1067 | ASSERT(zio->io_txg == spa_syncing_txg(spa)); | |
1068 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
1069 | ASSERT(!BP_GET_DEDUP(bp)); | |
34dc7c2f | 1070 | |
b128c09f BB |
1071 | if (pass > SYNC_PASS_DONT_COMPRESS) |
1072 | compress = ZIO_COMPRESS_OFF; | |
34dc7c2f | 1073 | |
b128c09f | 1074 | /* Make sure someone doesn't change their mind on overwrites */ |
428870ff BB |
1075 | ASSERT(MIN(zp->zp_copies + BP_IS_GANG(bp), |
1076 | spa_max_replication(spa)) == BP_GET_NDVAS(bp)); | |
b128c09f | 1077 | } |
34dc7c2f | 1078 | |
b128c09f | 1079 | if (compress != ZIO_COMPRESS_OFF) { |
428870ff BB |
1080 | void *cbuf = zio_buf_alloc(lsize); |
1081 | psize = zio_compress_data(compress, zio->io_data, cbuf, lsize); | |
1082 | if (psize == 0 || psize == lsize) { | |
b128c09f | 1083 | compress = ZIO_COMPRESS_OFF; |
428870ff BB |
1084 | zio_buf_free(cbuf, lsize); |
1085 | } else { | |
1086 | ASSERT(psize < lsize); | |
1087 | zio_push_transform(zio, cbuf, psize, lsize, NULL); | |
b128c09f BB |
1088 | } |
1089 | } | |
34dc7c2f | 1090 | |
b128c09f BB |
1091 | /* |
1092 | * The final pass of spa_sync() must be all rewrites, but the first | |
1093 | * few passes offer a trade-off: allocating blocks defers convergence, | |
1094 | * but newly allocated blocks are sequential, so they can be written | |
1095 | * to disk faster. Therefore, we allow the first few passes of | |
1096 | * spa_sync() to allocate new blocks, but force rewrites after that. | |
1097 | * There should only be a handful of blocks after pass 1 in any case. | |
1098 | */ | |
428870ff | 1099 | if (bp->blk_birth == zio->io_txg && BP_GET_PSIZE(bp) == psize && |
b128c09f | 1100 | pass > SYNC_PASS_REWRITE) { |
428870ff | 1101 | enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; |
d6320ddb | 1102 | ASSERT(psize != 0); |
b128c09f BB |
1103 | zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; |
1104 | zio->io_flags |= ZIO_FLAG_IO_REWRITE; | |
1105 | } else { | |
1106 | BP_ZERO(bp); | |
1107 | zio->io_pipeline = ZIO_WRITE_PIPELINE; | |
1108 | } | |
34dc7c2f | 1109 | |
428870ff | 1110 | if (psize == 0) { |
b128c09f BB |
1111 | zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; |
1112 | } else { | |
1113 | ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER); | |
1114 | BP_SET_LSIZE(bp, lsize); | |
428870ff | 1115 | BP_SET_PSIZE(bp, psize); |
b128c09f BB |
1116 | BP_SET_COMPRESS(bp, compress); |
1117 | BP_SET_CHECKSUM(bp, zp->zp_checksum); | |
1118 | BP_SET_TYPE(bp, zp->zp_type); | |
1119 | BP_SET_LEVEL(bp, zp->zp_level); | |
428870ff | 1120 | BP_SET_DEDUP(bp, zp->zp_dedup); |
b128c09f | 1121 | BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); |
428870ff BB |
1122 | if (zp->zp_dedup) { |
1123 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
1124 | ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE)); | |
1125 | zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE; | |
1126 | } | |
1127 | } | |
1128 | ||
1129 | return (ZIO_PIPELINE_CONTINUE); | |
1130 | } | |
1131 | ||
1132 | static int | |
1133 | zio_free_bp_init(zio_t *zio) | |
1134 | { | |
1135 | blkptr_t *bp = zio->io_bp; | |
1136 | ||
1137 | if (zio->io_child_type == ZIO_CHILD_LOGICAL) { | |
1138 | if (BP_GET_DEDUP(bp)) | |
1139 | zio->io_pipeline = ZIO_DDT_FREE_PIPELINE; | |
b128c09f | 1140 | } |
34dc7c2f BB |
1141 | |
1142 | return (ZIO_PIPELINE_CONTINUE); | |
1143 | } | |
1144 | ||
b128c09f BB |
1145 | /* |
1146 | * ========================================================================== | |
1147 | * Execute the I/O pipeline | |
1148 | * ========================================================================== | |
1149 | */ | |
1150 | ||
1151 | static void | |
428870ff | 1152 | zio_taskq_dispatch(zio_t *zio, enum zio_taskq_type q, boolean_t cutinline) |
34dc7c2f | 1153 | { |
428870ff | 1154 | spa_t *spa = zio->io_spa; |
b128c09f | 1155 | zio_type_t t = zio->io_type; |
a38718a6 | 1156 | int flags = (cutinline ? TQ_FRONT : 0); |
34dc7c2f BB |
1157 | |
1158 | /* | |
9babb374 BB |
1159 | * If we're a config writer or a probe, the normal issue and |
1160 | * interrupt threads may all be blocked waiting for the config lock. | |
1161 | * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL. | |
34dc7c2f | 1162 | */ |
9babb374 | 1163 | if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE)) |
b128c09f | 1164 | t = ZIO_TYPE_NULL; |
34dc7c2f BB |
1165 | |
1166 | /* | |
b128c09f | 1167 | * A similar issue exists for the L2ARC write thread until L2ARC 2.0. |
34dc7c2f | 1168 | */ |
b128c09f BB |
1169 | if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux) |
1170 | t = ZIO_TYPE_NULL; | |
34dc7c2f | 1171 | |
428870ff BB |
1172 | /* |
1173 | * If this is a high priority I/O, then use the high priority taskq. | |
1174 | */ | |
1175 | if (zio->io_priority == ZIO_PRIORITY_NOW && | |
1176 | spa->spa_zio_taskq[t][q + 1] != NULL) | |
1177 | q++; | |
1178 | ||
1179 | ASSERT3U(q, <, ZIO_TASKQ_TYPES); | |
5cc556b4 | 1180 | |
a38718a6 GA |
1181 | /* |
1182 | * NB: We are assuming that the zio can only be dispatched | |
1183 | * to a single taskq at a time. It would be a grievous error | |
1184 | * to dispatch the zio to another taskq at the same time. | |
1185 | */ | |
1186 | ASSERT(taskq_empty_ent(&zio->io_tqent)); | |
1187 | taskq_dispatch_ent(spa->spa_zio_taskq[t][q], | |
1188 | (task_func_t *)zio_execute, zio, flags, &zio->io_tqent); | |
b128c09f | 1189 | } |
34dc7c2f | 1190 | |
b128c09f BB |
1191 | static boolean_t |
1192 | zio_taskq_member(zio_t *zio, enum zio_taskq_type q) | |
1193 | { | |
1194 | kthread_t *executor = zio->io_executor; | |
1195 | spa_t *spa = zio->io_spa; | |
d6320ddb | 1196 | zio_type_t t; |
34dc7c2f | 1197 | |
d6320ddb | 1198 | for (t = 0; t < ZIO_TYPES; t++) |
b128c09f BB |
1199 | if (taskq_member(spa->spa_zio_taskq[t][q], executor)) |
1200 | return (B_TRUE); | |
34dc7c2f | 1201 | |
b128c09f BB |
1202 | return (B_FALSE); |
1203 | } | |
34dc7c2f | 1204 | |
b128c09f BB |
1205 | static int |
1206 | zio_issue_async(zio_t *zio) | |
1207 | { | |
428870ff | 1208 | zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); |
b128c09f BB |
1209 | |
1210 | return (ZIO_PIPELINE_STOP); | |
34dc7c2f BB |
1211 | } |
1212 | ||
b128c09f BB |
1213 | void |
1214 | zio_interrupt(zio_t *zio) | |
34dc7c2f | 1215 | { |
428870ff | 1216 | zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE); |
b128c09f | 1217 | } |
34dc7c2f | 1218 | |
b128c09f BB |
1219 | /* |
1220 | * Execute the I/O pipeline until one of the following occurs: | |
1221 | * (1) the I/O completes; (2) the pipeline stalls waiting for | |
1222 | * dependent child I/Os; (3) the I/O issues, so we're waiting | |
1223 | * for an I/O completion interrupt; (4) the I/O is delegated by | |
1224 | * vdev-level caching or aggregation; (5) the I/O is deferred | |
1225 | * due to vdev-level queueing; (6) the I/O is handed off to | |
1226 | * another thread. In all cases, the pipeline stops whenever | |
1227 | * there's no CPU work; it never burns a thread in cv_wait(). | |
1228 | * | |
1229 | * There's no locking on io_stage because there's no legitimate way | |
1230 | * for multiple threads to be attempting to process the same I/O. | |
1231 | */ | |
428870ff | 1232 | static zio_pipe_stage_t *zio_pipeline[]; |
34dc7c2f | 1233 | |
da6b4005 NB |
1234 | /* |
1235 | * zio_execute() is a wrapper around the static function | |
1236 | * __zio_execute() so that we can force __zio_execute() to be | |
1237 | * inlined. This reduces stack overhead which is important | |
1238 | * because __zio_execute() is called recursively in several zio | |
1239 | * code paths. zio_execute() itself cannot be inlined because | |
1240 | * it is externally visible. | |
1241 | */ | |
b128c09f BB |
1242 | void |
1243 | zio_execute(zio_t *zio) | |
da6b4005 NB |
1244 | { |
1245 | __zio_execute(zio); | |
1246 | } | |
1247 | ||
1248 | __attribute__((always_inline)) | |
1249 | static inline void | |
1250 | __zio_execute(zio_t *zio) | |
b128c09f BB |
1251 | { |
1252 | zio->io_executor = curthread; | |
34dc7c2f | 1253 | |
b128c09f | 1254 | while (zio->io_stage < ZIO_STAGE_DONE) { |
428870ff BB |
1255 | enum zio_stage pipeline = zio->io_pipeline; |
1256 | enum zio_stage stage = zio->io_stage; | |
91579709 | 1257 | dsl_pool_t *dp; |
2fac4c2a | 1258 | boolean_t cut; |
b128c09f | 1259 | int rv; |
34dc7c2f | 1260 | |
b128c09f | 1261 | ASSERT(!MUTEX_HELD(&zio->io_lock)); |
428870ff BB |
1262 | ASSERT(ISP2(stage)); |
1263 | ASSERT(zio->io_stall == NULL); | |
34dc7c2f | 1264 | |
428870ff BB |
1265 | do { |
1266 | stage <<= 1; | |
1267 | } while ((stage & pipeline) == 0); | |
b128c09f BB |
1268 | |
1269 | ASSERT(stage <= ZIO_STAGE_DONE); | |
34dc7c2f | 1270 | |
91579709 | 1271 | dp = spa_get_dsl(zio->io_spa); |
2fac4c2a BB |
1272 | cut = (stage == ZIO_STAGE_VDEV_IO_START) ? |
1273 | zio_requeue_io_start_cut_in_line : B_FALSE; | |
1274 | ||
34dc7c2f | 1275 | /* |
b128c09f BB |
1276 | * If we are in interrupt context and this pipeline stage |
1277 | * will grab a config lock that is held across I/O, | |
428870ff BB |
1278 | * or may wait for an I/O that needs an interrupt thread |
1279 | * to complete, issue async to avoid deadlock. | |
1280 | * | |
1281 | * For VDEV_IO_START, we cut in line so that the io will | |
1282 | * be sent to disk promptly. | |
34dc7c2f | 1283 | */ |
91579709 BB |
1284 | if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL && |
1285 | zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) { | |
1286 | zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); | |
1287 | return; | |
1288 | } | |
1289 | ||
4cec9b2d | 1290 | #ifdef _KERNEL |
91579709 BB |
1291 | /* |
1292 | * If we executing in the context of the tx_sync_thread, | |
1293 | * or we are performing pool initialization outside of a | |
1294 | * zio_taskq[ZIO_TASKQ_ISSUE] context. Then issue the zio | |
1295 | * async to minimize stack usage for these deep call paths. | |
1296 | */ | |
1297 | if ((dp && curthread == dp->dp_tx.tx_sync_thread) || | |
1298 | (dp && spa_is_initializing(dp->dp_spa) && | |
1299 | !zio_taskq_member(zio, ZIO_TASKQ_ISSUE))) { | |
428870ff | 1300 | zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut); |
b128c09f | 1301 | return; |
34dc7c2f | 1302 | } |
4cec9b2d | 1303 | #endif |
34dc7c2f | 1304 | |
b128c09f | 1305 | zio->io_stage = stage; |
428870ff | 1306 | rv = zio_pipeline[highbit(stage) - 1](zio); |
34dc7c2f | 1307 | |
b128c09f BB |
1308 | if (rv == ZIO_PIPELINE_STOP) |
1309 | return; | |
34dc7c2f | 1310 | |
b128c09f BB |
1311 | ASSERT(rv == ZIO_PIPELINE_CONTINUE); |
1312 | } | |
34dc7c2f BB |
1313 | } |
1314 | ||
da6b4005 | 1315 | |
b128c09f BB |
1316 | /* |
1317 | * ========================================================================== | |
1318 | * Initiate I/O, either sync or async | |
1319 | * ========================================================================== | |
1320 | */ | |
1321 | int | |
1322 | zio_wait(zio_t *zio) | |
34dc7c2f | 1323 | { |
b128c09f | 1324 | int error; |
34dc7c2f | 1325 | |
b128c09f BB |
1326 | ASSERT(zio->io_stage == ZIO_STAGE_OPEN); |
1327 | ASSERT(zio->io_executor == NULL); | |
34dc7c2f | 1328 | |
b128c09f | 1329 | zio->io_waiter = curthread; |
34dc7c2f | 1330 | |
da6b4005 | 1331 | __zio_execute(zio); |
34dc7c2f | 1332 | |
b128c09f | 1333 | mutex_enter(&zio->io_lock); |
72f53c56 | 1334 | while (zio->io_executor != NULL) |
72938d69 | 1335 | cv_wait_io(&zio->io_cv, &zio->io_lock); |
b128c09f | 1336 | mutex_exit(&zio->io_lock); |
34dc7c2f | 1337 | |
b128c09f BB |
1338 | error = zio->io_error; |
1339 | zio_destroy(zio); | |
34dc7c2f | 1340 | |
b128c09f BB |
1341 | return (error); |
1342 | } | |
34dc7c2f | 1343 | |
b128c09f BB |
1344 | void |
1345 | zio_nowait(zio_t *zio) | |
1346 | { | |
1347 | ASSERT(zio->io_executor == NULL); | |
34dc7c2f | 1348 | |
d164b209 BB |
1349 | if (zio->io_child_type == ZIO_CHILD_LOGICAL && |
1350 | zio_unique_parent(zio) == NULL) { | |
34dc7c2f | 1351 | /* |
b128c09f | 1352 | * This is a logical async I/O with no parent to wait for it. |
9babb374 BB |
1353 | * We add it to the spa_async_root_zio "Godfather" I/O which |
1354 | * will ensure they complete prior to unloading the pool. | |
34dc7c2f | 1355 | */ |
b128c09f | 1356 | spa_t *spa = zio->io_spa; |
9babb374 BB |
1357 | |
1358 | zio_add_child(spa->spa_async_zio_root, zio); | |
b128c09f | 1359 | } |
34dc7c2f | 1360 | |
da6b4005 | 1361 | __zio_execute(zio); |
b128c09f | 1362 | } |
34dc7c2f | 1363 | |
b128c09f BB |
1364 | /* |
1365 | * ========================================================================== | |
1366 | * Reexecute or suspend/resume failed I/O | |
1367 | * ========================================================================== | |
1368 | */ | |
34dc7c2f | 1369 | |
b128c09f BB |
1370 | static void |
1371 | zio_reexecute(zio_t *pio) | |
1372 | { | |
d164b209 | 1373 | zio_t *cio, *cio_next; |
d6320ddb | 1374 | int c, w; |
d164b209 BB |
1375 | |
1376 | ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); | |
1377 | ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); | |
9babb374 BB |
1378 | ASSERT(pio->io_gang_leader == NULL); |
1379 | ASSERT(pio->io_gang_tree == NULL); | |
34dc7c2f | 1380 | |
b128c09f BB |
1381 | pio->io_flags = pio->io_orig_flags; |
1382 | pio->io_stage = pio->io_orig_stage; | |
1383 | pio->io_pipeline = pio->io_orig_pipeline; | |
1384 | pio->io_reexecute = 0; | |
1385 | pio->io_error = 0; | |
d6320ddb | 1386 | for (w = 0; w < ZIO_WAIT_TYPES; w++) |
d164b209 | 1387 | pio->io_state[w] = 0; |
d6320ddb | 1388 | for (c = 0; c < ZIO_CHILD_TYPES; c++) |
b128c09f | 1389 | pio->io_child_error[c] = 0; |
34dc7c2f | 1390 | |
428870ff BB |
1391 | if (IO_IS_ALLOCATING(pio)) |
1392 | BP_ZERO(pio->io_bp); | |
34dc7c2f | 1393 | |
b128c09f BB |
1394 | /* |
1395 | * As we reexecute pio's children, new children could be created. | |
d164b209 | 1396 | * New children go to the head of pio's io_child_list, however, |
b128c09f | 1397 | * so we will (correctly) not reexecute them. The key is that |
d164b209 BB |
1398 | * the remainder of pio's io_child_list, from 'cio_next' onward, |
1399 | * cannot be affected by any side effects of reexecuting 'cio'. | |
b128c09f | 1400 | */ |
d164b209 BB |
1401 | for (cio = zio_walk_children(pio); cio != NULL; cio = cio_next) { |
1402 | cio_next = zio_walk_children(pio); | |
b128c09f | 1403 | mutex_enter(&pio->io_lock); |
d6320ddb | 1404 | for (w = 0; w < ZIO_WAIT_TYPES; w++) |
d164b209 | 1405 | pio->io_children[cio->io_child_type][w]++; |
b128c09f | 1406 | mutex_exit(&pio->io_lock); |
d164b209 | 1407 | zio_reexecute(cio); |
34dc7c2f | 1408 | } |
34dc7c2f | 1409 | |
b128c09f BB |
1410 | /* |
1411 | * Now that all children have been reexecuted, execute the parent. | |
9babb374 BB |
1412 | * We don't reexecute "The Godfather" I/O here as it's the |
1413 | * responsibility of the caller to wait on him. | |
b128c09f | 1414 | */ |
9babb374 | 1415 | if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) |
da6b4005 | 1416 | __zio_execute(pio); |
34dc7c2f BB |
1417 | } |
1418 | ||
b128c09f BB |
1419 | void |
1420 | zio_suspend(spa_t *spa, zio_t *zio) | |
34dc7c2f | 1421 | { |
b128c09f BB |
1422 | if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC) |
1423 | fm_panic("Pool '%s' has encountered an uncorrectable I/O " | |
1424 | "failure and the failure mode property for this pool " | |
1425 | "is set to panic.", spa_name(spa)); | |
34dc7c2f | 1426 | |
b128c09f | 1427 | zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL, NULL, 0, 0); |
34dc7c2f | 1428 | |
b128c09f | 1429 | mutex_enter(&spa->spa_suspend_lock); |
34dc7c2f | 1430 | |
b128c09f | 1431 | if (spa->spa_suspend_zio_root == NULL) |
9babb374 BB |
1432 | spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL, |
1433 | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | | |
1434 | ZIO_FLAG_GODFATHER); | |
34dc7c2f | 1435 | |
b128c09f | 1436 | spa->spa_suspended = B_TRUE; |
34dc7c2f | 1437 | |
b128c09f | 1438 | if (zio != NULL) { |
9babb374 | 1439 | ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); |
b128c09f BB |
1440 | ASSERT(zio != spa->spa_suspend_zio_root); |
1441 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
d164b209 | 1442 | ASSERT(zio_unique_parent(zio) == NULL); |
b128c09f BB |
1443 | ASSERT(zio->io_stage == ZIO_STAGE_DONE); |
1444 | zio_add_child(spa->spa_suspend_zio_root, zio); | |
1445 | } | |
34dc7c2f | 1446 | |
b128c09f BB |
1447 | mutex_exit(&spa->spa_suspend_lock); |
1448 | } | |
34dc7c2f | 1449 | |
9babb374 | 1450 | int |
b128c09f BB |
1451 | zio_resume(spa_t *spa) |
1452 | { | |
9babb374 | 1453 | zio_t *pio; |
34dc7c2f BB |
1454 | |
1455 | /* | |
b128c09f | 1456 | * Reexecute all previously suspended i/o. |
34dc7c2f | 1457 | */ |
b128c09f BB |
1458 | mutex_enter(&spa->spa_suspend_lock); |
1459 | spa->spa_suspended = B_FALSE; | |
1460 | cv_broadcast(&spa->spa_suspend_cv); | |
1461 | pio = spa->spa_suspend_zio_root; | |
1462 | spa->spa_suspend_zio_root = NULL; | |
1463 | mutex_exit(&spa->spa_suspend_lock); | |
1464 | ||
1465 | if (pio == NULL) | |
9babb374 | 1466 | return (0); |
34dc7c2f | 1467 | |
9babb374 BB |
1468 | zio_reexecute(pio); |
1469 | return (zio_wait(pio)); | |
b128c09f BB |
1470 | } |
1471 | ||
1472 | void | |
1473 | zio_resume_wait(spa_t *spa) | |
1474 | { | |
1475 | mutex_enter(&spa->spa_suspend_lock); | |
1476 | while (spa_suspended(spa)) | |
1477 | cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock); | |
1478 | mutex_exit(&spa->spa_suspend_lock); | |
34dc7c2f BB |
1479 | } |
1480 | ||
1481 | /* | |
1482 | * ========================================================================== | |
b128c09f BB |
1483 | * Gang blocks. |
1484 | * | |
1485 | * A gang block is a collection of small blocks that looks to the DMU | |
1486 | * like one large block. When zio_dva_allocate() cannot find a block | |
1487 | * of the requested size, due to either severe fragmentation or the pool | |
1488 | * being nearly full, it calls zio_write_gang_block() to construct the | |
1489 | * block from smaller fragments. | |
1490 | * | |
1491 | * A gang block consists of a gang header (zio_gbh_phys_t) and up to | |
1492 | * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like | |
1493 | * an indirect block: it's an array of block pointers. It consumes | |
1494 | * only one sector and hence is allocatable regardless of fragmentation. | |
1495 | * The gang header's bps point to its gang members, which hold the data. | |
1496 | * | |
1497 | * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg> | |
1498 | * as the verifier to ensure uniqueness of the SHA256 checksum. | |
1499 | * Critically, the gang block bp's blk_cksum is the checksum of the data, | |
1500 | * not the gang header. This ensures that data block signatures (needed for | |
1501 | * deduplication) are independent of how the block is physically stored. | |
1502 | * | |
1503 | * Gang blocks can be nested: a gang member may itself be a gang block. | |
1504 | * Thus every gang block is a tree in which root and all interior nodes are | |
1505 | * gang headers, and the leaves are normal blocks that contain user data. | |
1506 | * The root of the gang tree is called the gang leader. | |
1507 | * | |
1508 | * To perform any operation (read, rewrite, free, claim) on a gang block, | |
1509 | * zio_gang_assemble() first assembles the gang tree (minus data leaves) | |
1510 | * in the io_gang_tree field of the original logical i/o by recursively | |
1511 | * reading the gang leader and all gang headers below it. This yields | |
1512 | * an in-core tree containing the contents of every gang header and the | |
1513 | * bps for every constituent of the gang block. | |
1514 | * | |
1515 | * With the gang tree now assembled, zio_gang_issue() just walks the gang tree | |
1516 | * and invokes a callback on each bp. To free a gang block, zio_gang_issue() | |
1517 | * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp. | |
1518 | * zio_claim_gang() provides a similarly trivial wrapper for zio_claim(). | |
1519 | * zio_read_gang() is a wrapper around zio_read() that omits reading gang | |
1520 | * headers, since we already have those in io_gang_tree. zio_rewrite_gang() | |
1521 | * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite() | |
1522 | * of the gang header plus zio_checksum_compute() of the data to update the | |
1523 | * gang header's blk_cksum as described above. | |
1524 | * | |
1525 | * The two-phase assemble/issue model solves the problem of partial failure -- | |
1526 | * what if you'd freed part of a gang block but then couldn't read the | |
1527 | * gang header for another part? Assembling the entire gang tree first | |
1528 | * ensures that all the necessary gang header I/O has succeeded before | |
1529 | * starting the actual work of free, claim, or write. Once the gang tree | |
1530 | * is assembled, free and claim are in-memory operations that cannot fail. | |
1531 | * | |
1532 | * In the event that a gang write fails, zio_dva_unallocate() walks the | |
1533 | * gang tree to immediately free (i.e. insert back into the space map) | |
1534 | * everything we've allocated. This ensures that we don't get ENOSPC | |
1535 | * errors during repeated suspend/resume cycles due to a flaky device. | |
1536 | * | |
1537 | * Gang rewrites only happen during sync-to-convergence. If we can't assemble | |
1538 | * the gang tree, we won't modify the block, so we can safely defer the free | |
1539 | * (knowing that the block is still intact). If we *can* assemble the gang | |
1540 | * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free | |
1541 | * each constituent bp and we can allocate a new block on the next sync pass. | |
1542 | * | |
1543 | * In all cases, the gang tree allows complete recovery from partial failure. | |
34dc7c2f BB |
1544 | * ========================================================================== |
1545 | */ | |
b128c09f BB |
1546 | |
1547 | static zio_t * | |
1548 | zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) | |
34dc7c2f | 1549 | { |
b128c09f BB |
1550 | if (gn != NULL) |
1551 | return (pio); | |
34dc7c2f | 1552 | |
b128c09f BB |
1553 | return (zio_read(pio, pio->io_spa, bp, data, BP_GET_PSIZE(bp), |
1554 | NULL, NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), | |
1555 | &pio->io_bookmark)); | |
1556 | } | |
1557 | ||
1558 | zio_t * | |
1559 | zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) | |
1560 | { | |
1561 | zio_t *zio; | |
1562 | ||
1563 | if (gn != NULL) { | |
1564 | zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, | |
1565 | gn->gn_gbh, SPA_GANGBLOCKSIZE, NULL, NULL, pio->io_priority, | |
1566 | ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); | |
34dc7c2f | 1567 | /* |
b128c09f BB |
1568 | * As we rewrite each gang header, the pipeline will compute |
1569 | * a new gang block header checksum for it; but no one will | |
1570 | * compute a new data checksum, so we do that here. The one | |
1571 | * exception is the gang leader: the pipeline already computed | |
1572 | * its data checksum because that stage precedes gang assembly. | |
1573 | * (Presently, nothing actually uses interior data checksums; | |
1574 | * this is just good hygiene.) | |
34dc7c2f | 1575 | */ |
9babb374 | 1576 | if (gn != pio->io_gang_leader->io_gang_tree) { |
b128c09f BB |
1577 | zio_checksum_compute(zio, BP_GET_CHECKSUM(bp), |
1578 | data, BP_GET_PSIZE(bp)); | |
1579 | } | |
428870ff BB |
1580 | /* |
1581 | * If we are here to damage data for testing purposes, | |
1582 | * leave the GBH alone so that we can detect the damage. | |
1583 | */ | |
1584 | if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE) | |
1585 | zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; | |
34dc7c2f | 1586 | } else { |
b128c09f BB |
1587 | zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp, |
1588 | data, BP_GET_PSIZE(bp), NULL, NULL, pio->io_priority, | |
1589 | ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); | |
34dc7c2f BB |
1590 | } |
1591 | ||
b128c09f BB |
1592 | return (zio); |
1593 | } | |
34dc7c2f | 1594 | |
b128c09f BB |
1595 | /* ARGSUSED */ |
1596 | zio_t * | |
1597 | zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) | |
1598 | { | |
428870ff BB |
1599 | return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp, |
1600 | ZIO_GANG_CHILD_FLAGS(pio))); | |
34dc7c2f BB |
1601 | } |
1602 | ||
b128c09f BB |
1603 | /* ARGSUSED */ |
1604 | zio_t * | |
1605 | zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, void *data) | |
34dc7c2f | 1606 | { |
b128c09f BB |
1607 | return (zio_claim(pio, pio->io_spa, pio->io_txg, bp, |
1608 | NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio))); | |
1609 | } | |
1610 | ||
1611 | static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = { | |
1612 | NULL, | |
1613 | zio_read_gang, | |
1614 | zio_rewrite_gang, | |
1615 | zio_free_gang, | |
1616 | zio_claim_gang, | |
1617 | NULL | |
1618 | }; | |
34dc7c2f | 1619 | |
b128c09f | 1620 | static void zio_gang_tree_assemble_done(zio_t *zio); |
34dc7c2f | 1621 | |
b128c09f BB |
1622 | static zio_gang_node_t * |
1623 | zio_gang_node_alloc(zio_gang_node_t **gnpp) | |
1624 | { | |
1625 | zio_gang_node_t *gn; | |
34dc7c2f | 1626 | |
b128c09f | 1627 | ASSERT(*gnpp == NULL); |
34dc7c2f | 1628 | |
691f6ac4 | 1629 | gn = kmem_zalloc(sizeof (*gn), KM_PUSHPAGE); |
b128c09f BB |
1630 | gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE); |
1631 | *gnpp = gn; | |
34dc7c2f | 1632 | |
b128c09f | 1633 | return (gn); |
34dc7c2f BB |
1634 | } |
1635 | ||
34dc7c2f | 1636 | static void |
b128c09f | 1637 | zio_gang_node_free(zio_gang_node_t **gnpp) |
34dc7c2f | 1638 | { |
b128c09f | 1639 | zio_gang_node_t *gn = *gnpp; |
d6320ddb | 1640 | int g; |
34dc7c2f | 1641 | |
d6320ddb | 1642 | for (g = 0; g < SPA_GBH_NBLKPTRS; g++) |
b128c09f BB |
1643 | ASSERT(gn->gn_child[g] == NULL); |
1644 | ||
1645 | zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); | |
1646 | kmem_free(gn, sizeof (*gn)); | |
1647 | *gnpp = NULL; | |
34dc7c2f BB |
1648 | } |
1649 | ||
b128c09f BB |
1650 | static void |
1651 | zio_gang_tree_free(zio_gang_node_t **gnpp) | |
34dc7c2f | 1652 | { |
b128c09f | 1653 | zio_gang_node_t *gn = *gnpp; |
d6320ddb | 1654 | int g; |
34dc7c2f | 1655 | |
b128c09f BB |
1656 | if (gn == NULL) |
1657 | return; | |
34dc7c2f | 1658 | |
d6320ddb | 1659 | for (g = 0; g < SPA_GBH_NBLKPTRS; g++) |
b128c09f | 1660 | zio_gang_tree_free(&gn->gn_child[g]); |
34dc7c2f | 1661 | |
b128c09f | 1662 | zio_gang_node_free(gnpp); |
34dc7c2f BB |
1663 | } |
1664 | ||
b128c09f | 1665 | static void |
9babb374 | 1666 | zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp) |
34dc7c2f | 1667 | { |
b128c09f BB |
1668 | zio_gang_node_t *gn = zio_gang_node_alloc(gnpp); |
1669 | ||
9babb374 | 1670 | ASSERT(gio->io_gang_leader == gio); |
b128c09f | 1671 | ASSERT(BP_IS_GANG(bp)); |
34dc7c2f | 1672 | |
9babb374 | 1673 | zio_nowait(zio_read(gio, gio->io_spa, bp, gn->gn_gbh, |
b128c09f | 1674 | SPA_GANGBLOCKSIZE, zio_gang_tree_assemble_done, gn, |
9babb374 | 1675 | gio->io_priority, ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark)); |
b128c09f | 1676 | } |
34dc7c2f | 1677 | |
b128c09f BB |
1678 | static void |
1679 | zio_gang_tree_assemble_done(zio_t *zio) | |
1680 | { | |
9babb374 | 1681 | zio_t *gio = zio->io_gang_leader; |
b128c09f BB |
1682 | zio_gang_node_t *gn = zio->io_private; |
1683 | blkptr_t *bp = zio->io_bp; | |
d6320ddb | 1684 | int g; |
34dc7c2f | 1685 | |
9babb374 | 1686 | ASSERT(gio == zio_unique_parent(zio)); |
428870ff | 1687 | ASSERT(zio->io_child_count == 0); |
34dc7c2f | 1688 | |
b128c09f BB |
1689 | if (zio->io_error) |
1690 | return; | |
34dc7c2f | 1691 | |
b128c09f BB |
1692 | if (BP_SHOULD_BYTESWAP(bp)) |
1693 | byteswap_uint64_array(zio->io_data, zio->io_size); | |
34dc7c2f | 1694 | |
b128c09f BB |
1695 | ASSERT(zio->io_data == gn->gn_gbh); |
1696 | ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); | |
428870ff | 1697 | ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); |
34dc7c2f | 1698 | |
d6320ddb | 1699 | for (g = 0; g < SPA_GBH_NBLKPTRS; g++) { |
b128c09f BB |
1700 | blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; |
1701 | if (!BP_IS_GANG(gbp)) | |
1702 | continue; | |
9babb374 | 1703 | zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]); |
b128c09f | 1704 | } |
34dc7c2f BB |
1705 | } |
1706 | ||
b128c09f BB |
1707 | static void |
1708 | zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, void *data) | |
34dc7c2f | 1709 | { |
9babb374 | 1710 | zio_t *gio = pio->io_gang_leader; |
b128c09f | 1711 | zio_t *zio; |
d6320ddb | 1712 | int g; |
34dc7c2f | 1713 | |
b128c09f | 1714 | ASSERT(BP_IS_GANG(bp) == !!gn); |
9babb374 BB |
1715 | ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); |
1716 | ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree); | |
34dc7c2f | 1717 | |
b128c09f BB |
1718 | /* |
1719 | * If you're a gang header, your data is in gn->gn_gbh. | |
1720 | * If you're a gang member, your data is in 'data' and gn == NULL. | |
1721 | */ | |
9babb374 | 1722 | zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data); |
34dc7c2f | 1723 | |
b128c09f | 1724 | if (gn != NULL) { |
428870ff | 1725 | ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); |
34dc7c2f | 1726 | |
d6320ddb | 1727 | for (g = 0; g < SPA_GBH_NBLKPTRS; g++) { |
b128c09f BB |
1728 | blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; |
1729 | if (BP_IS_HOLE(gbp)) | |
1730 | continue; | |
1731 | zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data); | |
1732 | data = (char *)data + BP_GET_PSIZE(gbp); | |
1733 | } | |
34dc7c2f BB |
1734 | } |
1735 | ||
9babb374 BB |
1736 | if (gn == gio->io_gang_tree) |
1737 | ASSERT3P((char *)gio->io_data + gio->io_size, ==, data); | |
34dc7c2f | 1738 | |
b128c09f BB |
1739 | if (zio != pio) |
1740 | zio_nowait(zio); | |
34dc7c2f BB |
1741 | } |
1742 | ||
1743 | static int | |
b128c09f | 1744 | zio_gang_assemble(zio_t *zio) |
34dc7c2f | 1745 | { |
b128c09f | 1746 | blkptr_t *bp = zio->io_bp; |
34dc7c2f | 1747 | |
9babb374 BB |
1748 | ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL); |
1749 | ASSERT(zio->io_child_type > ZIO_CHILD_GANG); | |
1750 | ||
1751 | zio->io_gang_leader = zio; | |
34dc7c2f | 1752 | |
b128c09f | 1753 | zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree); |
34dc7c2f BB |
1754 | |
1755 | return (ZIO_PIPELINE_CONTINUE); | |
1756 | } | |
1757 | ||
1758 | static int | |
b128c09f | 1759 | zio_gang_issue(zio_t *zio) |
34dc7c2f | 1760 | { |
b128c09f | 1761 | blkptr_t *bp = zio->io_bp; |
34dc7c2f | 1762 | |
b128c09f BB |
1763 | if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE)) |
1764 | return (ZIO_PIPELINE_STOP); | |
34dc7c2f | 1765 | |
9babb374 BB |
1766 | ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio); |
1767 | ASSERT(zio->io_child_type > ZIO_CHILD_GANG); | |
34dc7c2f | 1768 | |
b128c09f | 1769 | if (zio->io_child_error[ZIO_CHILD_GANG] == 0) |
9babb374 | 1770 | zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_data); |
b128c09f | 1771 | else |
9babb374 | 1772 | zio_gang_tree_free(&zio->io_gang_tree); |
34dc7c2f | 1773 | |
b128c09f | 1774 | zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; |
34dc7c2f BB |
1775 | |
1776 | return (ZIO_PIPELINE_CONTINUE); | |
1777 | } | |
1778 | ||
1779 | static void | |
b128c09f | 1780 | zio_write_gang_member_ready(zio_t *zio) |
34dc7c2f | 1781 | { |
d164b209 | 1782 | zio_t *pio = zio_unique_parent(zio); |
1fde1e37 | 1783 | ASSERTV(zio_t *gio = zio->io_gang_leader;) |
34dc7c2f BB |
1784 | dva_t *cdva = zio->io_bp->blk_dva; |
1785 | dva_t *pdva = pio->io_bp->blk_dva; | |
1786 | uint64_t asize; | |
d6320ddb | 1787 | int d; |
34dc7c2f | 1788 | |
b128c09f BB |
1789 | if (BP_IS_HOLE(zio->io_bp)) |
1790 | return; | |
1791 | ||
1792 | ASSERT(BP_IS_HOLE(&zio->io_bp_orig)); | |
1793 | ||
1794 | ASSERT(zio->io_child_type == ZIO_CHILD_GANG); | |
428870ff BB |
1795 | ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies); |
1796 | ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); | |
1797 | ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp)); | |
34dc7c2f | 1798 | ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); |
34dc7c2f BB |
1799 | |
1800 | mutex_enter(&pio->io_lock); | |
d6320ddb | 1801 | for (d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { |
34dc7c2f BB |
1802 | ASSERT(DVA_GET_GANG(&pdva[d])); |
1803 | asize = DVA_GET_ASIZE(&pdva[d]); | |
1804 | asize += DVA_GET_ASIZE(&cdva[d]); | |
1805 | DVA_SET_ASIZE(&pdva[d], asize); | |
1806 | } | |
1807 | mutex_exit(&pio->io_lock); | |
1808 | } | |
1809 | ||
1810 | static int | |
b128c09f | 1811 | zio_write_gang_block(zio_t *pio) |
34dc7c2f | 1812 | { |
b128c09f BB |
1813 | spa_t *spa = pio->io_spa; |
1814 | blkptr_t *bp = pio->io_bp; | |
9babb374 | 1815 | zio_t *gio = pio->io_gang_leader; |
b128c09f BB |
1816 | zio_t *zio; |
1817 | zio_gang_node_t *gn, **gnpp; | |
34dc7c2f | 1818 | zio_gbh_phys_t *gbh; |
b128c09f BB |
1819 | uint64_t txg = pio->io_txg; |
1820 | uint64_t resid = pio->io_size; | |
1821 | uint64_t lsize; | |
428870ff BB |
1822 | int copies = gio->io_prop.zp_copies; |
1823 | int gbh_copies = MIN(copies + 1, spa_max_replication(spa)); | |
b128c09f | 1824 | zio_prop_t zp; |
d6320ddb | 1825 | int g, error; |
34dc7c2f | 1826 | |
428870ff BB |
1827 | error = metaslab_alloc(spa, spa_normal_class(spa), SPA_GANGBLOCKSIZE, |
1828 | bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, | |
b128c09f | 1829 | METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER); |
34dc7c2f | 1830 | if (error) { |
b128c09f | 1831 | pio->io_error = error; |
34dc7c2f BB |
1832 | return (ZIO_PIPELINE_CONTINUE); |
1833 | } | |
1834 | ||
9babb374 BB |
1835 | if (pio == gio) { |
1836 | gnpp = &gio->io_gang_tree; | |
b128c09f BB |
1837 | } else { |
1838 | gnpp = pio->io_private; | |
1839 | ASSERT(pio->io_ready == zio_write_gang_member_ready); | |
34dc7c2f BB |
1840 | } |
1841 | ||
b128c09f BB |
1842 | gn = zio_gang_node_alloc(gnpp); |
1843 | gbh = gn->gn_gbh; | |
1844 | bzero(gbh, SPA_GANGBLOCKSIZE); | |
34dc7c2f | 1845 | |
b128c09f BB |
1846 | /* |
1847 | * Create the gang header. | |
1848 | */ | |
1849 | zio = zio_rewrite(pio, spa, txg, bp, gbh, SPA_GANGBLOCKSIZE, NULL, NULL, | |
1850 | pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark); | |
34dc7c2f | 1851 | |
b128c09f BB |
1852 | /* |
1853 | * Create and nowait the gang children. | |
1854 | */ | |
d6320ddb | 1855 | for (g = 0; resid != 0; resid -= lsize, g++) { |
b128c09f BB |
1856 | lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), |
1857 | SPA_MINBLOCKSIZE); | |
1858 | ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); | |
1859 | ||
9babb374 | 1860 | zp.zp_checksum = gio->io_prop.zp_checksum; |
b128c09f BB |
1861 | zp.zp_compress = ZIO_COMPRESS_OFF; |
1862 | zp.zp_type = DMU_OT_NONE; | |
1863 | zp.zp_level = 0; | |
428870ff BB |
1864 | zp.zp_copies = gio->io_prop.zp_copies; |
1865 | zp.zp_dedup = 0; | |
1866 | zp.zp_dedup_verify = 0; | |
b128c09f BB |
1867 | |
1868 | zio_nowait(zio_write(zio, spa, txg, &gbh->zg_blkptr[g], | |
1869 | (char *)pio->io_data + (pio->io_size - resid), lsize, &zp, | |
1870 | zio_write_gang_member_ready, NULL, &gn->gn_child[g], | |
1871 | pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio), | |
1872 | &pio->io_bookmark)); | |
1873 | } | |
34dc7c2f BB |
1874 | |
1875 | /* | |
b128c09f | 1876 | * Set pio's pipeline to just wait for zio to finish. |
34dc7c2f | 1877 | */ |
b128c09f BB |
1878 | pio->io_pipeline = ZIO_INTERLOCK_PIPELINE; |
1879 | ||
920dd524 ED |
1880 | /* |
1881 | * We didn't allocate this bp, so make sure it doesn't get unmarked. | |
1882 | */ | |
1883 | pio->io_flags &= ~ZIO_FLAG_FASTWRITE; | |
1884 | ||
b128c09f BB |
1885 | zio_nowait(zio); |
1886 | ||
1887 | return (ZIO_PIPELINE_CONTINUE); | |
34dc7c2f BB |
1888 | } |
1889 | ||
1890 | /* | |
1891 | * ========================================================================== | |
428870ff | 1892 | * Dedup |
34dc7c2f BB |
1893 | * ========================================================================== |
1894 | */ | |
428870ff BB |
1895 | static void |
1896 | zio_ddt_child_read_done(zio_t *zio) | |
1897 | { | |
1898 | blkptr_t *bp = zio->io_bp; | |
1899 | ddt_entry_t *dde = zio->io_private; | |
1900 | ddt_phys_t *ddp; | |
1901 | zio_t *pio = zio_unique_parent(zio); | |
1902 | ||
1903 | mutex_enter(&pio->io_lock); | |
1904 | ddp = ddt_phys_select(dde, bp); | |
1905 | if (zio->io_error == 0) | |
1906 | ddt_phys_clear(ddp); /* this ddp doesn't need repair */ | |
1907 | if (zio->io_error == 0 && dde->dde_repair_data == NULL) | |
1908 | dde->dde_repair_data = zio->io_data; | |
1909 | else | |
1910 | zio_buf_free(zio->io_data, zio->io_size); | |
1911 | mutex_exit(&pio->io_lock); | |
1912 | } | |
1913 | ||
1914 | static int | |
1915 | zio_ddt_read_start(zio_t *zio) | |
1916 | { | |
1917 | blkptr_t *bp = zio->io_bp; | |
d6320ddb | 1918 | int p; |
428870ff BB |
1919 | |
1920 | ASSERT(BP_GET_DEDUP(bp)); | |
1921 | ASSERT(BP_GET_PSIZE(bp) == zio->io_size); | |
1922 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
1923 | ||
1924 | if (zio->io_child_error[ZIO_CHILD_DDT]) { | |
1925 | ddt_t *ddt = ddt_select(zio->io_spa, bp); | |
1926 | ddt_entry_t *dde = ddt_repair_start(ddt, bp); | |
1927 | ddt_phys_t *ddp = dde->dde_phys; | |
1928 | ddt_phys_t *ddp_self = ddt_phys_select(dde, bp); | |
1929 | blkptr_t blk; | |
1930 | ||
1931 | ASSERT(zio->io_vsd == NULL); | |
1932 | zio->io_vsd = dde; | |
1933 | ||
1934 | if (ddp_self == NULL) | |
1935 | return (ZIO_PIPELINE_CONTINUE); | |
1936 | ||
d6320ddb | 1937 | for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { |
428870ff BB |
1938 | if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) |
1939 | continue; | |
1940 | ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, | |
1941 | &blk); | |
1942 | zio_nowait(zio_read(zio, zio->io_spa, &blk, | |
1943 | zio_buf_alloc(zio->io_size), zio->io_size, | |
1944 | zio_ddt_child_read_done, dde, zio->io_priority, | |
1945 | ZIO_DDT_CHILD_FLAGS(zio) | ZIO_FLAG_DONT_PROPAGATE, | |
1946 | &zio->io_bookmark)); | |
1947 | } | |
1948 | return (ZIO_PIPELINE_CONTINUE); | |
1949 | } | |
1950 | ||
1951 | zio_nowait(zio_read(zio, zio->io_spa, bp, | |
1952 | zio->io_data, zio->io_size, NULL, NULL, zio->io_priority, | |
1953 | ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark)); | |
1954 | ||
1955 | return (ZIO_PIPELINE_CONTINUE); | |
1956 | } | |
1957 | ||
1958 | static int | |
1959 | zio_ddt_read_done(zio_t *zio) | |
1960 | { | |
1961 | blkptr_t *bp = zio->io_bp; | |
1962 | ||
1963 | if (zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE)) | |
1964 | return (ZIO_PIPELINE_STOP); | |
1965 | ||
1966 | ASSERT(BP_GET_DEDUP(bp)); | |
1967 | ASSERT(BP_GET_PSIZE(bp) == zio->io_size); | |
1968 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
1969 | ||
1970 | if (zio->io_child_error[ZIO_CHILD_DDT]) { | |
1971 | ddt_t *ddt = ddt_select(zio->io_spa, bp); | |
1972 | ddt_entry_t *dde = zio->io_vsd; | |
1973 | if (ddt == NULL) { | |
1974 | ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE); | |
1975 | return (ZIO_PIPELINE_CONTINUE); | |
1976 | } | |
1977 | if (dde == NULL) { | |
1978 | zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1; | |
1979 | zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE); | |
1980 | return (ZIO_PIPELINE_STOP); | |
1981 | } | |
1982 | if (dde->dde_repair_data != NULL) { | |
1983 | bcopy(dde->dde_repair_data, zio->io_data, zio->io_size); | |
1984 | zio->io_child_error[ZIO_CHILD_DDT] = 0; | |
1985 | } | |
1986 | ddt_repair_done(ddt, dde); | |
1987 | zio->io_vsd = NULL; | |
1988 | } | |
1989 | ||
1990 | ASSERT(zio->io_vsd == NULL); | |
1991 | ||
1992 | return (ZIO_PIPELINE_CONTINUE); | |
1993 | } | |
1994 | ||
1995 | static boolean_t | |
1996 | zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) | |
1997 | { | |
1998 | spa_t *spa = zio->io_spa; | |
d6320ddb | 1999 | int p; |
428870ff BB |
2000 | |
2001 | /* | |
2002 | * Note: we compare the original data, not the transformed data, | |
2003 | * because when zio->io_bp is an override bp, we will not have | |
2004 | * pushed the I/O transforms. That's an important optimization | |
2005 | * because otherwise we'd compress/encrypt all dmu_sync() data twice. | |
2006 | */ | |
d6320ddb | 2007 | for (p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { |
428870ff BB |
2008 | zio_t *lio = dde->dde_lead_zio[p]; |
2009 | ||
2010 | if (lio != NULL) { | |
2011 | return (lio->io_orig_size != zio->io_orig_size || | |
2012 | bcmp(zio->io_orig_data, lio->io_orig_data, | |
2013 | zio->io_orig_size) != 0); | |
2014 | } | |
2015 | } | |
2016 | ||
d6320ddb | 2017 | for (p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { |
428870ff BB |
2018 | ddt_phys_t *ddp = &dde->dde_phys[p]; |
2019 | ||
2020 | if (ddp->ddp_phys_birth != 0) { | |
2021 | arc_buf_t *abuf = NULL; | |
2022 | uint32_t aflags = ARC_WAIT; | |
2023 | blkptr_t blk = *zio->io_bp; | |
2024 | int error; | |
2025 | ||
2026 | ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth); | |
2027 | ||
2028 | ddt_exit(ddt); | |
2029 | ||
2030 | error = arc_read_nolock(NULL, spa, &blk, | |
2031 | arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ, | |
2032 | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, | |
2033 | &aflags, &zio->io_bookmark); | |
2034 | ||
2035 | if (error == 0) { | |
2036 | if (arc_buf_size(abuf) != zio->io_orig_size || | |
2037 | bcmp(abuf->b_data, zio->io_orig_data, | |
2038 | zio->io_orig_size) != 0) | |
2039 | error = EEXIST; | |
2040 | VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1); | |
2041 | } | |
2042 | ||
2043 | ddt_enter(ddt); | |
2044 | return (error != 0); | |
2045 | } | |
2046 | } | |
2047 | ||
2048 | return (B_FALSE); | |
2049 | } | |
2050 | ||
2051 | static void | |
2052 | zio_ddt_child_write_ready(zio_t *zio) | |
2053 | { | |
2054 | int p = zio->io_prop.zp_copies; | |
2055 | ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); | |
2056 | ddt_entry_t *dde = zio->io_private; | |
2057 | ddt_phys_t *ddp = &dde->dde_phys[p]; | |
2058 | zio_t *pio; | |
2059 | ||
2060 | if (zio->io_error) | |
2061 | return; | |
2062 | ||
2063 | ddt_enter(ddt); | |
2064 | ||
2065 | ASSERT(dde->dde_lead_zio[p] == zio); | |
2066 | ||
2067 | ddt_phys_fill(ddp, zio->io_bp); | |
2068 | ||
2069 | while ((pio = zio_walk_parents(zio)) != NULL) | |
2070 | ddt_bp_fill(ddp, pio->io_bp, zio->io_txg); | |
2071 | ||
2072 | ddt_exit(ddt); | |
2073 | } | |
2074 | ||
2075 | static void | |
2076 | zio_ddt_child_write_done(zio_t *zio) | |
2077 | { | |
2078 | int p = zio->io_prop.zp_copies; | |
2079 | ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp); | |
2080 | ddt_entry_t *dde = zio->io_private; | |
2081 | ddt_phys_t *ddp = &dde->dde_phys[p]; | |
2082 | ||
2083 | ddt_enter(ddt); | |
2084 | ||
2085 | ASSERT(ddp->ddp_refcnt == 0); | |
2086 | ASSERT(dde->dde_lead_zio[p] == zio); | |
2087 | dde->dde_lead_zio[p] = NULL; | |
2088 | ||
2089 | if (zio->io_error == 0) { | |
2090 | while (zio_walk_parents(zio) != NULL) | |
2091 | ddt_phys_addref(ddp); | |
2092 | } else { | |
2093 | ddt_phys_clear(ddp); | |
2094 | } | |
2095 | ||
2096 | ddt_exit(ddt); | |
2097 | } | |
2098 | ||
2099 | static void | |
2100 | zio_ddt_ditto_write_done(zio_t *zio) | |
2101 | { | |
2102 | int p = DDT_PHYS_DITTO; | |
428870ff BB |
2103 | blkptr_t *bp = zio->io_bp; |
2104 | ddt_t *ddt = ddt_select(zio->io_spa, bp); | |
2105 | ddt_entry_t *dde = zio->io_private; | |
2106 | ddt_phys_t *ddp = &dde->dde_phys[p]; | |
2107 | ddt_key_t *ddk = &dde->dde_key; | |
1fde1e37 | 2108 | ASSERTV(zio_prop_t *zp = &zio->io_prop); |
428870ff BB |
2109 | |
2110 | ddt_enter(ddt); | |
2111 | ||
2112 | ASSERT(ddp->ddp_refcnt == 0); | |
2113 | ASSERT(dde->dde_lead_zio[p] == zio); | |
2114 | dde->dde_lead_zio[p] = NULL; | |
2115 | ||
2116 | if (zio->io_error == 0) { | |
2117 | ASSERT(ZIO_CHECKSUM_EQUAL(bp->blk_cksum, ddk->ddk_cksum)); | |
2118 | ASSERT(zp->zp_copies < SPA_DVAS_PER_BP); | |
2119 | ASSERT(zp->zp_copies == BP_GET_NDVAS(bp) - BP_IS_GANG(bp)); | |
2120 | if (ddp->ddp_phys_birth != 0) | |
2121 | ddt_phys_free(ddt, ddk, ddp, zio->io_txg); | |
2122 | ddt_phys_fill(ddp, bp); | |
2123 | } | |
2124 | ||
2125 | ddt_exit(ddt); | |
2126 | } | |
2127 | ||
2128 | static int | |
2129 | zio_ddt_write(zio_t *zio) | |
2130 | { | |
2131 | spa_t *spa = zio->io_spa; | |
2132 | blkptr_t *bp = zio->io_bp; | |
2133 | uint64_t txg = zio->io_txg; | |
2134 | zio_prop_t *zp = &zio->io_prop; | |
2135 | int p = zp->zp_copies; | |
2136 | int ditto_copies; | |
2137 | zio_t *cio = NULL; | |
2138 | zio_t *dio = NULL; | |
2139 | ddt_t *ddt = ddt_select(spa, bp); | |
2140 | ddt_entry_t *dde; | |
2141 | ddt_phys_t *ddp; | |
2142 | ||
2143 | ASSERT(BP_GET_DEDUP(bp)); | |
2144 | ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum); | |
2145 | ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override); | |
2146 | ||
2147 | ddt_enter(ddt); | |
2148 | dde = ddt_lookup(ddt, bp, B_TRUE); | |
2149 | ddp = &dde->dde_phys[p]; | |
2150 | ||
2151 | if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) { | |
2152 | /* | |
2153 | * If we're using a weak checksum, upgrade to a strong checksum | |
2154 | * and try again. If we're already using a strong checksum, | |
2155 | * we can't resolve it, so just convert to an ordinary write. | |
2156 | * (And automatically e-mail a paper to Nature?) | |
2157 | */ | |
2158 | if (!zio_checksum_table[zp->zp_checksum].ci_dedup) { | |
2159 | zp->zp_checksum = spa_dedup_checksum(spa); | |
2160 | zio_pop_transforms(zio); | |
2161 | zio->io_stage = ZIO_STAGE_OPEN; | |
2162 | BP_ZERO(bp); | |
2163 | } else { | |
2164 | zp->zp_dedup = 0; | |
2165 | } | |
2166 | zio->io_pipeline = ZIO_WRITE_PIPELINE; | |
2167 | ddt_exit(ddt); | |
2168 | return (ZIO_PIPELINE_CONTINUE); | |
2169 | } | |
2170 | ||
2171 | ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp); | |
2172 | ASSERT(ditto_copies < SPA_DVAS_PER_BP); | |
2173 | ||
2174 | if (ditto_copies > ddt_ditto_copies_present(dde) && | |
2175 | dde->dde_lead_zio[DDT_PHYS_DITTO] == NULL) { | |
2176 | zio_prop_t czp = *zp; | |
2177 | ||
2178 | czp.zp_copies = ditto_copies; | |
2179 | ||
2180 | /* | |
2181 | * If we arrived here with an override bp, we won't have run | |
2182 | * the transform stack, so we won't have the data we need to | |
2183 | * generate a child i/o. So, toss the override bp and restart. | |
2184 | * This is safe, because using the override bp is just an | |
2185 | * optimization; and it's rare, so the cost doesn't matter. | |
2186 | */ | |
2187 | if (zio->io_bp_override) { | |
2188 | zio_pop_transforms(zio); | |
2189 | zio->io_stage = ZIO_STAGE_OPEN; | |
2190 | zio->io_pipeline = ZIO_WRITE_PIPELINE; | |
2191 | zio->io_bp_override = NULL; | |
2192 | BP_ZERO(bp); | |
2193 | ddt_exit(ddt); | |
2194 | return (ZIO_PIPELINE_CONTINUE); | |
2195 | } | |
2196 | ||
2197 | dio = zio_write(zio, spa, txg, bp, zio->io_orig_data, | |
2198 | zio->io_orig_size, &czp, NULL, | |
2199 | zio_ddt_ditto_write_done, dde, zio->io_priority, | |
2200 | ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); | |
2201 | ||
2202 | zio_push_transform(dio, zio->io_data, zio->io_size, 0, NULL); | |
2203 | dde->dde_lead_zio[DDT_PHYS_DITTO] = dio; | |
2204 | } | |
2205 | ||
2206 | if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) { | |
2207 | if (ddp->ddp_phys_birth != 0) | |
2208 | ddt_bp_fill(ddp, bp, txg); | |
2209 | if (dde->dde_lead_zio[p] != NULL) | |
2210 | zio_add_child(zio, dde->dde_lead_zio[p]); | |
2211 | else | |
2212 | ddt_phys_addref(ddp); | |
2213 | } else if (zio->io_bp_override) { | |
2214 | ASSERT(bp->blk_birth == txg); | |
2215 | ASSERT(BP_EQUAL(bp, zio->io_bp_override)); | |
2216 | ddt_phys_fill(ddp, bp); | |
2217 | ddt_phys_addref(ddp); | |
2218 | } else { | |
2219 | cio = zio_write(zio, spa, txg, bp, zio->io_orig_data, | |
2220 | zio->io_orig_size, zp, zio_ddt_child_write_ready, | |
2221 | zio_ddt_child_write_done, dde, zio->io_priority, | |
2222 | ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark); | |
2223 | ||
2224 | zio_push_transform(cio, zio->io_data, zio->io_size, 0, NULL); | |
2225 | dde->dde_lead_zio[p] = cio; | |
2226 | } | |
2227 | ||
2228 | ddt_exit(ddt); | |
2229 | ||
2230 | if (cio) | |
2231 | zio_nowait(cio); | |
2232 | if (dio) | |
2233 | zio_nowait(dio); | |
2234 | ||
2235 | return (ZIO_PIPELINE_CONTINUE); | |
2236 | } | |
2237 | ||
2238 | ddt_entry_t *freedde; /* for debugging */ | |
b128c09f | 2239 | |
428870ff BB |
2240 | static int |
2241 | zio_ddt_free(zio_t *zio) | |
2242 | { | |
2243 | spa_t *spa = zio->io_spa; | |
2244 | blkptr_t *bp = zio->io_bp; | |
2245 | ddt_t *ddt = ddt_select(spa, bp); | |
2246 | ddt_entry_t *dde; | |
2247 | ddt_phys_t *ddp; | |
2248 | ||
2249 | ASSERT(BP_GET_DEDUP(bp)); | |
2250 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
2251 | ||
2252 | ddt_enter(ddt); | |
2253 | freedde = dde = ddt_lookup(ddt, bp, B_TRUE); | |
5dc6af0e BB |
2254 | if (dde) { |
2255 | ddp = ddt_phys_select(dde, bp); | |
2256 | if (ddp) | |
2257 | ddt_phys_decref(ddp); | |
2258 | } | |
428870ff BB |
2259 | ddt_exit(ddt); |
2260 | ||
2261 | return (ZIO_PIPELINE_CONTINUE); | |
2262 | } | |
2263 | ||
2264 | /* | |
2265 | * ========================================================================== | |
2266 | * Allocate and free blocks | |
2267 | * ========================================================================== | |
2268 | */ | |
34dc7c2f BB |
2269 | static int |
2270 | zio_dva_allocate(zio_t *zio) | |
2271 | { | |
2272 | spa_t *spa = zio->io_spa; | |
428870ff | 2273 | metaslab_class_t *mc = spa_normal_class(spa); |
34dc7c2f BB |
2274 | blkptr_t *bp = zio->io_bp; |
2275 | int error; | |
6d974228 | 2276 | int flags = 0; |
34dc7c2f | 2277 | |
9babb374 BB |
2278 | if (zio->io_gang_leader == NULL) { |
2279 | ASSERT(zio->io_child_type > ZIO_CHILD_GANG); | |
2280 | zio->io_gang_leader = zio; | |
2281 | } | |
2282 | ||
34dc7c2f BB |
2283 | ASSERT(BP_IS_HOLE(bp)); |
2284 | ASSERT3U(BP_GET_NDVAS(bp), ==, 0); | |
428870ff BB |
2285 | ASSERT3U(zio->io_prop.zp_copies, >, 0); |
2286 | ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa)); | |
34dc7c2f BB |
2287 | ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); |
2288 | ||
6d974228 GW |
2289 | /* |
2290 | * The dump device does not support gang blocks so allocation on | |
2291 | * behalf of the dump device (i.e. ZIO_FLAG_NODATA) must avoid | |
2292 | * the "fast" gang feature. | |
2293 | */ | |
2294 | flags |= (zio->io_flags & ZIO_FLAG_NODATA) ? METASLAB_GANG_AVOID : 0; | |
2295 | flags |= (zio->io_flags & ZIO_FLAG_GANG_CHILD) ? | |
2296 | METASLAB_GANG_CHILD : 0; | |
920dd524 | 2297 | flags |= (zio->io_flags & ZIO_FLAG_FASTWRITE) ? METASLAB_FASTWRITE : 0; |
b128c09f | 2298 | error = metaslab_alloc(spa, mc, zio->io_size, bp, |
6d974228 | 2299 | zio->io_prop.zp_copies, zio->io_txg, NULL, flags); |
34dc7c2f | 2300 | |
b128c09f | 2301 | if (error) { |
6d974228 GW |
2302 | spa_dbgmsg(spa, "%s: metaslab allocation failure: zio %p, " |
2303 | "size %llu, error %d", spa_name(spa), zio, zio->io_size, | |
2304 | error); | |
b128c09f BB |
2305 | if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) |
2306 | return (zio_write_gang_block(zio)); | |
34dc7c2f BB |
2307 | zio->io_error = error; |
2308 | } | |
2309 | ||
2310 | return (ZIO_PIPELINE_CONTINUE); | |
2311 | } | |
2312 | ||
2313 | static int | |
2314 | zio_dva_free(zio_t *zio) | |
2315 | { | |
b128c09f | 2316 | metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE); |
34dc7c2f BB |
2317 | |
2318 | return (ZIO_PIPELINE_CONTINUE); | |
2319 | } | |
2320 | ||
2321 | static int | |
2322 | zio_dva_claim(zio_t *zio) | |
2323 | { | |
b128c09f BB |
2324 | int error; |
2325 | ||
2326 | error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); | |
2327 | if (error) | |
2328 | zio->io_error = error; | |
34dc7c2f BB |
2329 | |
2330 | return (ZIO_PIPELINE_CONTINUE); | |
2331 | } | |
2332 | ||
b128c09f BB |
2333 | /* |
2334 | * Undo an allocation. This is used by zio_done() when an I/O fails | |
2335 | * and we want to give back the block we just allocated. | |
2336 | * This handles both normal blocks and gang blocks. | |
2337 | */ | |
2338 | static void | |
2339 | zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) | |
2340 | { | |
d6320ddb BB |
2341 | int g; |
2342 | ||
b128c09f | 2343 | ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); |
428870ff | 2344 | ASSERT(zio->io_bp_override == NULL); |
b128c09f BB |
2345 | |
2346 | if (!BP_IS_HOLE(bp)) | |
428870ff | 2347 | metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE); |
b128c09f BB |
2348 | |
2349 | if (gn != NULL) { | |
d6320ddb | 2350 | for (g = 0; g < SPA_GBH_NBLKPTRS; g++) { |
b128c09f BB |
2351 | zio_dva_unallocate(zio, gn->gn_child[g], |
2352 | &gn->gn_gbh->zg_blkptr[g]); | |
2353 | } | |
2354 | } | |
2355 | } | |
2356 | ||
2357 | /* | |
2358 | * Try to allocate an intent log block. Return 0 on success, errno on failure. | |
2359 | */ | |
2360 | int | |
920dd524 ED |
2361 | zio_alloc_zil(spa_t *spa, uint64_t txg, blkptr_t *new_bp, uint64_t size, |
2362 | boolean_t use_slog) | |
b128c09f | 2363 | { |
428870ff | 2364 | int error = 1; |
b128c09f | 2365 | |
428870ff BB |
2366 | ASSERT(txg > spa_syncing_txg(spa)); |
2367 | ||
ebf8e3a2 BB |
2368 | /* |
2369 | * ZIL blocks are always contiguous (i.e. not gang blocks) so we | |
2370 | * set the METASLAB_GANG_AVOID flag so that they don't "fast gang" | |
2371 | * when allocating them. | |
2372 | */ | |
2373 | if (use_slog) { | |
428870ff | 2374 | error = metaslab_alloc(spa, spa_log_class(spa), size, |
920dd524 ED |
2375 | new_bp, 1, txg, NULL, |
2376 | METASLAB_FASTWRITE | METASLAB_GANG_AVOID); | |
ebf8e3a2 | 2377 | } |
b128c09f | 2378 | |
ebf8e3a2 | 2379 | if (error) { |
428870ff | 2380 | error = metaslab_alloc(spa, spa_normal_class(spa), size, |
920dd524 ED |
2381 | new_bp, 1, txg, NULL, |
2382 | METASLAB_FASTWRITE | METASLAB_GANG_AVOID); | |
ebf8e3a2 | 2383 | } |
b128c09f BB |
2384 | |
2385 | if (error == 0) { | |
2386 | BP_SET_LSIZE(new_bp, size); | |
2387 | BP_SET_PSIZE(new_bp, size); | |
2388 | BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); | |
428870ff BB |
2389 | BP_SET_CHECKSUM(new_bp, |
2390 | spa_version(spa) >= SPA_VERSION_SLIM_ZIL | |
2391 | ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG); | |
b128c09f BB |
2392 | BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); |
2393 | BP_SET_LEVEL(new_bp, 0); | |
428870ff | 2394 | BP_SET_DEDUP(new_bp, 0); |
b128c09f BB |
2395 | BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); |
2396 | } | |
2397 | ||
2398 | return (error); | |
2399 | } | |
2400 | ||
2401 | /* | |
428870ff | 2402 | * Free an intent log block. |
b128c09f BB |
2403 | */ |
2404 | void | |
428870ff | 2405 | zio_free_zil(spa_t *spa, uint64_t txg, blkptr_t *bp) |
b128c09f | 2406 | { |
428870ff | 2407 | ASSERT(BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG); |
b128c09f BB |
2408 | ASSERT(!BP_IS_GANG(bp)); |
2409 | ||
428870ff | 2410 | zio_free(spa, txg, bp); |
b128c09f BB |
2411 | } |
2412 | ||
34dc7c2f BB |
2413 | /* |
2414 | * ========================================================================== | |
2415 | * Read and write to physical devices | |
2416 | * ========================================================================== | |
2417 | */ | |
34dc7c2f BB |
2418 | static int |
2419 | zio_vdev_io_start(zio_t *zio) | |
2420 | { | |
2421 | vdev_t *vd = zio->io_vd; | |
34dc7c2f BB |
2422 | uint64_t align; |
2423 | spa_t *spa = zio->io_spa; | |
2424 | ||
b128c09f BB |
2425 | ASSERT(zio->io_error == 0); |
2426 | ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); | |
34dc7c2f | 2427 | |
b128c09f BB |
2428 | if (vd == NULL) { |
2429 | if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) | |
2430 | spa_config_enter(spa, SCL_ZIO, zio, RW_READER); | |
34dc7c2f | 2431 | |
b128c09f BB |
2432 | /* |
2433 | * The mirror_ops handle multiple DVAs in a single BP. | |
2434 | */ | |
2435 | return (vdev_mirror_ops.vdev_op_io_start(zio)); | |
34dc7c2f BB |
2436 | } |
2437 | ||
572e2857 BB |
2438 | /* |
2439 | * We keep track of time-sensitive I/Os so that the scan thread | |
2440 | * can quickly react to certain workloads. In particular, we care | |
2441 | * about non-scrubbing, top-level reads and writes with the following | |
2442 | * characteristics: | |
2443 | * - synchronous writes of user data to non-slog devices | |
2444 | * - any reads of user data | |
2445 | * When these conditions are met, adjust the timestamp of spa_last_io | |
2446 | * which allows the scan thread to adjust its workload accordingly. | |
2447 | */ | |
2448 | if (!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && zio->io_bp != NULL && | |
2449 | vd == vd->vdev_top && !vd->vdev_islog && | |
2450 | zio->io_bookmark.zb_objset != DMU_META_OBJSET && | |
2451 | zio->io_txg != spa_syncing_txg(spa)) { | |
2452 | uint64_t old = spa->spa_last_io; | |
2453 | uint64_t new = ddi_get_lbolt64(); | |
2454 | if (old != new) | |
2455 | (void) atomic_cas_64(&spa->spa_last_io, old, new); | |
2456 | } | |
2457 | ||
b128c09f BB |
2458 | align = 1ULL << vd->vdev_top->vdev_ashift; |
2459 | ||
178e73b3 | 2460 | if (P2PHASE(zio->io_size, align) != 0) { |
34dc7c2f BB |
2461 | uint64_t asize = P2ROUNDUP(zio->io_size, align); |
2462 | char *abuf = zio_buf_alloc(asize); | |
178e73b3 | 2463 | ASSERT(vd == vd->vdev_top); |
34dc7c2f BB |
2464 | if (zio->io_type == ZIO_TYPE_WRITE) { |
2465 | bcopy(zio->io_data, abuf, zio->io_size); | |
2466 | bzero(abuf + zio->io_size, asize - zio->io_size); | |
2467 | } | |
b128c09f | 2468 | zio_push_transform(zio, abuf, asize, asize, zio_subblock); |
34dc7c2f BB |
2469 | } |
2470 | ||
2471 | ASSERT(P2PHASE(zio->io_offset, align) == 0); | |
178e73b3 | 2472 | ASSERT(P2PHASE(zio->io_size, align) == 0); |
572e2857 | 2473 | VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa)); |
fb5f0bc8 BB |
2474 | |
2475 | /* | |
2476 | * If this is a repair I/O, and there's no self-healing involved -- | |
2477 | * that is, we're just resilvering what we expect to resilver -- | |
2478 | * then don't do the I/O unless zio's txg is actually in vd's DTL. | |
2479 | * This prevents spurious resilvering with nested replication. | |
2480 | * For example, given a mirror of mirrors, (A+B)+(C+D), if only | |
2481 | * A is out of date, we'll read from C+D, then use the data to | |
2482 | * resilver A+B -- but we don't actually want to resilver B, just A. | |
2483 | * The top-level mirror has no way to know this, so instead we just | |
2484 | * discard unnecessary repairs as we work our way down the vdev tree. | |
2485 | * The same logic applies to any form of nested replication: | |
2486 | * ditto + mirror, RAID-Z + replacing, etc. This covers them all. | |
2487 | */ | |
2488 | if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) && | |
2489 | !(zio->io_flags & ZIO_FLAG_SELF_HEAL) && | |
2490 | zio->io_txg != 0 && /* not a delegated i/o */ | |
2491 | !vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) { | |
2492 | ASSERT(zio->io_type == ZIO_TYPE_WRITE); | |
fb5f0bc8 BB |
2493 | zio_vdev_io_bypass(zio); |
2494 | return (ZIO_PIPELINE_CONTINUE); | |
2495 | } | |
34dc7c2f | 2496 | |
b128c09f BB |
2497 | if (vd->vdev_ops->vdev_op_leaf && |
2498 | (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE)) { | |
2499 | ||
2500 | if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio) == 0) | |
d164b209 | 2501 | return (ZIO_PIPELINE_CONTINUE); |
b128c09f BB |
2502 | |
2503 | if ((zio = vdev_queue_io(zio)) == NULL) | |
2504 | return (ZIO_PIPELINE_STOP); | |
2505 | ||
2506 | if (!vdev_accessible(vd, zio)) { | |
2507 | zio->io_error = ENXIO; | |
2508 | zio_interrupt(zio); | |
2509 | return (ZIO_PIPELINE_STOP); | |
2510 | } | |
b128c09f BB |
2511 | } |
2512 | ||
34dc7c2f BB |
2513 | return (vd->vdev_ops->vdev_op_io_start(zio)); |
2514 | } | |
2515 | ||
2516 | static int | |
2517 | zio_vdev_io_done(zio_t *zio) | |
2518 | { | |
b128c09f BB |
2519 | vdev_t *vd = zio->io_vd; |
2520 | vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops; | |
2521 | boolean_t unexpected_error = B_FALSE; | |
34dc7c2f | 2522 | |
b128c09f BB |
2523 | if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) |
2524 | return (ZIO_PIPELINE_STOP); | |
34dc7c2f | 2525 | |
b128c09f BB |
2526 | ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE); |
2527 | ||
2528 | if (vd != NULL && vd->vdev_ops->vdev_op_leaf) { | |
2529 | ||
2530 | vdev_queue_io_done(zio); | |
2531 | ||
2532 | if (zio->io_type == ZIO_TYPE_WRITE) | |
2533 | vdev_cache_write(zio); | |
2534 | ||
2535 | if (zio_injection_enabled && zio->io_error == 0) | |
9babb374 BB |
2536 | zio->io_error = zio_handle_device_injection(vd, |
2537 | zio, EIO); | |
b128c09f BB |
2538 | |
2539 | if (zio_injection_enabled && zio->io_error == 0) | |
2540 | zio->io_error = zio_handle_label_injection(zio, EIO); | |
2541 | ||
2542 | if (zio->io_error) { | |
2543 | if (!vdev_accessible(vd, zio)) { | |
2544 | zio->io_error = ENXIO; | |
2545 | } else { | |
2546 | unexpected_error = B_TRUE; | |
2547 | } | |
2548 | } | |
2549 | } | |
2550 | ||
2551 | ops->vdev_op_io_done(zio); | |
34dc7c2f | 2552 | |
b128c09f | 2553 | if (unexpected_error) |
d164b209 | 2554 | VERIFY(vdev_probe(vd, zio) == NULL); |
34dc7c2f | 2555 | |
b128c09f | 2556 | return (ZIO_PIPELINE_CONTINUE); |
34dc7c2f BB |
2557 | } |
2558 | ||
428870ff BB |
2559 | /* |
2560 | * For non-raidz ZIOs, we can just copy aside the bad data read from the | |
2561 | * disk, and use that to finish the checksum ereport later. | |
2562 | */ | |
2563 | static void | |
2564 | zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr, | |
2565 | const void *good_buf) | |
2566 | { | |
2567 | /* no processing needed */ | |
2568 | zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE); | |
2569 | } | |
2570 | ||
2571 | /*ARGSUSED*/ | |
2572 | void | |
2573 | zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored) | |
2574 | { | |
2575 | void *buf = zio_buf_alloc(zio->io_size); | |
2576 | ||
2577 | bcopy(zio->io_data, buf, zio->io_size); | |
2578 | ||
2579 | zcr->zcr_cbinfo = zio->io_size; | |
2580 | zcr->zcr_cbdata = buf; | |
2581 | zcr->zcr_finish = zio_vsd_default_cksum_finish; | |
2582 | zcr->zcr_free = zio_buf_free; | |
2583 | } | |
2584 | ||
34dc7c2f BB |
2585 | static int |
2586 | zio_vdev_io_assess(zio_t *zio) | |
2587 | { | |
2588 | vdev_t *vd = zio->io_vd; | |
b128c09f BB |
2589 | |
2590 | if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE)) | |
2591 | return (ZIO_PIPELINE_STOP); | |
2592 | ||
2593 | if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) | |
2594 | spa_config_exit(zio->io_spa, SCL_ZIO, zio); | |
2595 | ||
2596 | if (zio->io_vsd != NULL) { | |
428870ff | 2597 | zio->io_vsd_ops->vsd_free(zio); |
b128c09f | 2598 | zio->io_vsd = NULL; |
34dc7c2f BB |
2599 | } |
2600 | ||
b128c09f | 2601 | if (zio_injection_enabled && zio->io_error == 0) |
34dc7c2f BB |
2602 | zio->io_error = zio_handle_fault_injection(zio, EIO); |
2603 | ||
2604 | /* | |
2605 | * If the I/O failed, determine whether we should attempt to retry it. | |
428870ff BB |
2606 | * |
2607 | * On retry, we cut in line in the issue queue, since we don't want | |
2608 | * compression/checksumming/etc. work to prevent our (cheap) IO reissue. | |
34dc7c2f | 2609 | */ |
b128c09f BB |
2610 | if (zio->io_error && vd == NULL && |
2611 | !(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) { | |
2612 | ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */ | |
2613 | ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */ | |
34dc7c2f | 2614 | zio->io_error = 0; |
b128c09f BB |
2615 | zio->io_flags |= ZIO_FLAG_IO_RETRY | |
2616 | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE; | |
428870ff BB |
2617 | zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1; |
2618 | zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, | |
2619 | zio_requeue_io_start_cut_in_line); | |
b128c09f | 2620 | return (ZIO_PIPELINE_STOP); |
34dc7c2f BB |
2621 | } |
2622 | ||
b128c09f BB |
2623 | /* |
2624 | * If we got an error on a leaf device, convert it to ENXIO | |
2625 | * if the device is not accessible at all. | |
2626 | */ | |
2627 | if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf && | |
2628 | !vdev_accessible(vd, zio)) | |
2629 | zio->io_error = ENXIO; | |
2630 | ||
2631 | /* | |
2632 | * If we can't write to an interior vdev (mirror or RAID-Z), | |
2633 | * set vdev_cant_write so that we stop trying to allocate from it. | |
2634 | */ | |
2635 | if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE && | |
2636 | vd != NULL && !vd->vdev_ops->vdev_op_leaf) | |
2637 | vd->vdev_cant_write = B_TRUE; | |
2638 | ||
2639 | if (zio->io_error) | |
2640 | zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; | |
2641 | ||
34dc7c2f BB |
2642 | return (ZIO_PIPELINE_CONTINUE); |
2643 | } | |
2644 | ||
2645 | void | |
2646 | zio_vdev_io_reissue(zio_t *zio) | |
2647 | { | |
2648 | ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); | |
2649 | ASSERT(zio->io_error == 0); | |
2650 | ||
428870ff | 2651 | zio->io_stage >>= 1; |
34dc7c2f BB |
2652 | } |
2653 | ||
2654 | void | |
2655 | zio_vdev_io_redone(zio_t *zio) | |
2656 | { | |
2657 | ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); | |
2658 | ||
428870ff | 2659 | zio->io_stage >>= 1; |
34dc7c2f BB |
2660 | } |
2661 | ||
2662 | void | |
2663 | zio_vdev_io_bypass(zio_t *zio) | |
2664 | { | |
2665 | ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); | |
2666 | ASSERT(zio->io_error == 0); | |
2667 | ||
2668 | zio->io_flags |= ZIO_FLAG_IO_BYPASS; | |
428870ff | 2669 | zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1; |
34dc7c2f BB |
2670 | } |
2671 | ||
2672 | /* | |
2673 | * ========================================================================== | |
2674 | * Generate and verify checksums | |
2675 | * ========================================================================== | |
2676 | */ | |
2677 | static int | |
2678 | zio_checksum_generate(zio_t *zio) | |
2679 | { | |
34dc7c2f | 2680 | blkptr_t *bp = zio->io_bp; |
b128c09f | 2681 | enum zio_checksum checksum; |
34dc7c2f | 2682 | |
b128c09f BB |
2683 | if (bp == NULL) { |
2684 | /* | |
2685 | * This is zio_write_phys(). | |
2686 | * We're either generating a label checksum, or none at all. | |
2687 | */ | |
2688 | checksum = zio->io_prop.zp_checksum; | |
34dc7c2f | 2689 | |
b128c09f BB |
2690 | if (checksum == ZIO_CHECKSUM_OFF) |
2691 | return (ZIO_PIPELINE_CONTINUE); | |
2692 | ||
2693 | ASSERT(checksum == ZIO_CHECKSUM_LABEL); | |
2694 | } else { | |
2695 | if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) { | |
2696 | ASSERT(!IO_IS_ALLOCATING(zio)); | |
2697 | checksum = ZIO_CHECKSUM_GANG_HEADER; | |
2698 | } else { | |
2699 | checksum = BP_GET_CHECKSUM(bp); | |
2700 | } | |
2701 | } | |
34dc7c2f | 2702 | |
b128c09f | 2703 | zio_checksum_compute(zio, checksum, zio->io_data, zio->io_size); |
34dc7c2f BB |
2704 | |
2705 | return (ZIO_PIPELINE_CONTINUE); | |
2706 | } | |
2707 | ||
2708 | static int | |
b128c09f | 2709 | zio_checksum_verify(zio_t *zio) |
34dc7c2f | 2710 | { |
428870ff | 2711 | zio_bad_cksum_t info; |
b128c09f BB |
2712 | blkptr_t *bp = zio->io_bp; |
2713 | int error; | |
34dc7c2f | 2714 | |
428870ff BB |
2715 | ASSERT(zio->io_vd != NULL); |
2716 | ||
b128c09f BB |
2717 | if (bp == NULL) { |
2718 | /* | |
2719 | * This is zio_read_phys(). | |
2720 | * We're either verifying a label checksum, or nothing at all. | |
2721 | */ | |
2722 | if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF) | |
2723 | return (ZIO_PIPELINE_CONTINUE); | |
34dc7c2f | 2724 | |
b128c09f BB |
2725 | ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL); |
2726 | } | |
34dc7c2f | 2727 | |
428870ff | 2728 | if ((error = zio_checksum_error(zio, &info)) != 0) { |
b128c09f BB |
2729 | zio->io_error = error; |
2730 | if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) { | |
428870ff BB |
2731 | zfs_ereport_start_checksum(zio->io_spa, |
2732 | zio->io_vd, zio, zio->io_offset, | |
2733 | zio->io_size, NULL, &info); | |
b128c09f | 2734 | } |
34dc7c2f BB |
2735 | } |
2736 | ||
2737 | return (ZIO_PIPELINE_CONTINUE); | |
2738 | } | |
2739 | ||
2740 | /* | |
2741 | * Called by RAID-Z to ensure we don't compute the checksum twice. | |
2742 | */ | |
2743 | void | |
2744 | zio_checksum_verified(zio_t *zio) | |
2745 | { | |
428870ff | 2746 | zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY; |
34dc7c2f BB |
2747 | } |
2748 | ||
2749 | /* | |
b128c09f BB |
2750 | * ========================================================================== |
2751 | * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other. | |
2752 | * An error of 0 indictes success. ENXIO indicates whole-device failure, | |
2753 | * which may be transient (e.g. unplugged) or permament. ECKSUM and EIO | |
2754 | * indicate errors that are specific to one I/O, and most likely permanent. | |
2755 | * Any other error is presumed to be worse because we weren't expecting it. | |
2756 | * ========================================================================== | |
34dc7c2f | 2757 | */ |
b128c09f BB |
2758 | int |
2759 | zio_worst_error(int e1, int e2) | |
34dc7c2f | 2760 | { |
b128c09f BB |
2761 | static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO }; |
2762 | int r1, r2; | |
2763 | ||
2764 | for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++) | |
2765 | if (e1 == zio_error_rank[r1]) | |
2766 | break; | |
34dc7c2f | 2767 | |
b128c09f BB |
2768 | for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++) |
2769 | if (e2 == zio_error_rank[r2]) | |
2770 | break; | |
2771 | ||
2772 | return (r1 > r2 ? e1 : e2); | |
34dc7c2f BB |
2773 | } |
2774 | ||
2775 | /* | |
2776 | * ========================================================================== | |
b128c09f | 2777 | * I/O completion |
34dc7c2f BB |
2778 | * ========================================================================== |
2779 | */ | |
b128c09f BB |
2780 | static int |
2781 | zio_ready(zio_t *zio) | |
34dc7c2f | 2782 | { |
b128c09f | 2783 | blkptr_t *bp = zio->io_bp; |
d164b209 | 2784 | zio_t *pio, *pio_next; |
34dc7c2f | 2785 | |
428870ff BB |
2786 | if (zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_READY) || |
2787 | zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_READY)) | |
9babb374 | 2788 | return (ZIO_PIPELINE_STOP); |
34dc7c2f | 2789 | |
9babb374 | 2790 | if (zio->io_ready) { |
b128c09f BB |
2791 | ASSERT(IO_IS_ALLOCATING(zio)); |
2792 | ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); | |
2793 | ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0); | |
34dc7c2f | 2794 | |
b128c09f BB |
2795 | zio->io_ready(zio); |
2796 | } | |
34dc7c2f | 2797 | |
b128c09f BB |
2798 | if (bp != NULL && bp != &zio->io_bp_copy) |
2799 | zio->io_bp_copy = *bp; | |
34dc7c2f | 2800 | |
b128c09f BB |
2801 | if (zio->io_error) |
2802 | zio->io_pipeline = ZIO_INTERLOCK_PIPELINE; | |
34dc7c2f | 2803 | |
d164b209 BB |
2804 | mutex_enter(&zio->io_lock); |
2805 | zio->io_state[ZIO_WAIT_READY] = 1; | |
2806 | pio = zio_walk_parents(zio); | |
2807 | mutex_exit(&zio->io_lock); | |
2808 | ||
2809 | /* | |
2810 | * As we notify zio's parents, new parents could be added. | |
2811 | * New parents go to the head of zio's io_parent_list, however, | |
2812 | * so we will (correctly) not notify them. The remainder of zio's | |
2813 | * io_parent_list, from 'pio_next' onward, cannot change because | |
2814 | * all parents must wait for us to be done before they can be done. | |
2815 | */ | |
2816 | for (; pio != NULL; pio = pio_next) { | |
2817 | pio_next = zio_walk_parents(zio); | |
b128c09f | 2818 | zio_notify_parent(pio, zio, ZIO_WAIT_READY); |
d164b209 | 2819 | } |
34dc7c2f | 2820 | |
428870ff BB |
2821 | if (zio->io_flags & ZIO_FLAG_NODATA) { |
2822 | if (BP_IS_GANG(bp)) { | |
2823 | zio->io_flags &= ~ZIO_FLAG_NODATA; | |
2824 | } else { | |
2825 | ASSERT((uintptr_t)zio->io_data < SPA_MAXBLOCKSIZE); | |
2826 | zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES; | |
2827 | } | |
2828 | } | |
2829 | ||
2830 | if (zio_injection_enabled && | |
2831 | zio->io_spa->spa_syncing_txg == zio->io_txg) | |
2832 | zio_handle_ignored_writes(zio); | |
2833 | ||
b128c09f | 2834 | return (ZIO_PIPELINE_CONTINUE); |
34dc7c2f BB |
2835 | } |
2836 | ||
b128c09f BB |
2837 | static int |
2838 | zio_done(zio_t *zio) | |
34dc7c2f | 2839 | { |
d164b209 | 2840 | zio_t *pio, *pio_next; |
d6320ddb | 2841 | int c, w; |
34dc7c2f | 2842 | |
b128c09f | 2843 | /* |
9babb374 | 2844 | * If our children haven't all completed, |
b128c09f BB |
2845 | * wait for them and then repeat this pipeline stage. |
2846 | */ | |
2847 | if (zio_wait_for_children(zio, ZIO_CHILD_VDEV, ZIO_WAIT_DONE) || | |
2848 | zio_wait_for_children(zio, ZIO_CHILD_GANG, ZIO_WAIT_DONE) || | |
428870ff | 2849 | zio_wait_for_children(zio, ZIO_CHILD_DDT, ZIO_WAIT_DONE) || |
b128c09f BB |
2850 | zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_DONE)) |
2851 | return (ZIO_PIPELINE_STOP); | |
34dc7c2f | 2852 | |
d6320ddb BB |
2853 | for (c = 0; c < ZIO_CHILD_TYPES; c++) |
2854 | for (w = 0; w < ZIO_WAIT_TYPES; w++) | |
b128c09f BB |
2855 | ASSERT(zio->io_children[c][w] == 0); |
2856 | ||
c776b317 BB |
2857 | if (zio->io_bp != NULL) { |
2858 | ASSERT(zio->io_bp->blk_pad[0] == 0); | |
2859 | ASSERT(zio->io_bp->blk_pad[1] == 0); | |
2860 | ASSERT(bcmp(zio->io_bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 || | |
2861 | (zio->io_bp == zio_unique_parent(zio)->io_bp)); | |
2862 | if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) && | |
428870ff | 2863 | zio->io_bp_override == NULL && |
b128c09f | 2864 | !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { |
c776b317 BB |
2865 | ASSERT(!BP_SHOULD_BYTESWAP(zio->io_bp)); |
2866 | ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); | |
2867 | ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 || | |
2868 | (BP_COUNT_GANG(zio->io_bp) == BP_GET_NDVAS(zio->io_bp))); | |
b128c09f BB |
2869 | } |
2870 | } | |
2871 | ||
2872 | /* | |
428870ff | 2873 | * If there were child vdev/gang/ddt errors, they apply to us now. |
b128c09f BB |
2874 | */ |
2875 | zio_inherit_child_errors(zio, ZIO_CHILD_VDEV); | |
2876 | zio_inherit_child_errors(zio, ZIO_CHILD_GANG); | |
428870ff BB |
2877 | zio_inherit_child_errors(zio, ZIO_CHILD_DDT); |
2878 | ||
2879 | /* | |
2880 | * If the I/O on the transformed data was successful, generate any | |
2881 | * checksum reports now while we still have the transformed data. | |
2882 | */ | |
2883 | if (zio->io_error == 0) { | |
2884 | while (zio->io_cksum_report != NULL) { | |
2885 | zio_cksum_report_t *zcr = zio->io_cksum_report; | |
2886 | uint64_t align = zcr->zcr_align; | |
c776b317 | 2887 | uint64_t asize = P2ROUNDUP(zio->io_size, align); |
428870ff BB |
2888 | char *abuf = zio->io_data; |
2889 | ||
c776b317 | 2890 | if (asize != zio->io_size) { |
428870ff | 2891 | abuf = zio_buf_alloc(asize); |
c776b317 BB |
2892 | bcopy(zio->io_data, abuf, zio->io_size); |
2893 | bzero(abuf + zio->io_size, asize - zio->io_size); | |
428870ff BB |
2894 | } |
2895 | ||
2896 | zio->io_cksum_report = zcr->zcr_next; | |
2897 | zcr->zcr_next = NULL; | |
2898 | zcr->zcr_finish(zcr, abuf); | |
2899 | zfs_ereport_free_checksum(zcr); | |
2900 | ||
c776b317 | 2901 | if (asize != zio->io_size) |
428870ff BB |
2902 | zio_buf_free(abuf, asize); |
2903 | } | |
2904 | } | |
b128c09f BB |
2905 | |
2906 | zio_pop_transforms(zio); /* note: may set zio->io_error */ | |
2907 | ||
c776b317 | 2908 | vdev_stat_update(zio, zio->io_size); |
b128c09f | 2909 | |
a69052be | 2910 | /* |
cc92e9d0 | 2911 | * If this I/O is attached to a particular vdev is slow, exceeding |
72f53c56 MJ |
2912 | * 30 seconds to complete, post an error described the I/O delay. |
2913 | * We ignore these errors if the device is currently unavailable. | |
a69052be | 2914 | */ |
cc92e9d0 | 2915 | if (zio->io_delay >= MSEC_TO_TICK(zio_delay_max)) { |
72f53c56 MJ |
2916 | if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) |
2917 | zfs_ereport_post(FM_EREPORT_ZFS_DELAY, zio->io_spa, | |
2918 | zio->io_vd, zio, 0, 0); | |
2919 | } | |
a69052be | 2920 | |
b128c09f BB |
2921 | if (zio->io_error) { |
2922 | /* | |
2923 | * If this I/O is attached to a particular vdev, | |
2924 | * generate an error message describing the I/O failure | |
2925 | * at the block level. We ignore these errors if the | |
2926 | * device is currently unavailable. | |
2927 | */ | |
c776b317 BB |
2928 | if (zio->io_error != ECKSUM && zio->io_vd != NULL && |
2929 | !vdev_is_dead(zio->io_vd)) | |
2930 | zfs_ereport_post(FM_EREPORT_ZFS_IO, zio->io_spa, | |
2931 | zio->io_vd, zio, 0, 0); | |
34dc7c2f | 2932 | |
428870ff BB |
2933 | if ((zio->io_error == EIO || !(zio->io_flags & |
2934 | (ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) && | |
c776b317 | 2935 | zio == zio->io_logical) { |
b128c09f BB |
2936 | /* |
2937 | * For logical I/O requests, tell the SPA to log the | |
2938 | * error and generate a logical data ereport. | |
2939 | */ | |
c776b317 BB |
2940 | spa_log_error(zio->io_spa, zio); |
2941 | zfs_ereport_post(FM_EREPORT_ZFS_DATA, zio->io_spa, NULL, zio, | |
b128c09f BB |
2942 | 0, 0); |
2943 | } | |
2944 | } | |
34dc7c2f | 2945 | |
c776b317 | 2946 | if (zio->io_error && zio == zio->io_logical) { |
b128c09f BB |
2947 | /* |
2948 | * Determine whether zio should be reexecuted. This will | |
2949 | * propagate all the way to the root via zio_notify_parent(). | |
2950 | */ | |
c776b317 | 2951 | ASSERT(zio->io_vd == NULL && zio->io_bp != NULL); |
428870ff | 2952 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); |
b128c09f | 2953 | |
428870ff BB |
2954 | if (IO_IS_ALLOCATING(zio) && |
2955 | !(zio->io_flags & ZIO_FLAG_CANFAIL)) { | |
b128c09f BB |
2956 | if (zio->io_error != ENOSPC) |
2957 | zio->io_reexecute |= ZIO_REEXECUTE_NOW; | |
2958 | else | |
2959 | zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; | |
428870ff | 2960 | } |
b128c09f BB |
2961 | |
2962 | if ((zio->io_type == ZIO_TYPE_READ || | |
2963 | zio->io_type == ZIO_TYPE_FREE) && | |
572e2857 | 2964 | !(zio->io_flags & ZIO_FLAG_SCAN_THREAD) && |
b128c09f | 2965 | zio->io_error == ENXIO && |
c776b317 BB |
2966 | spa_load_state(zio->io_spa) == SPA_LOAD_NONE && |
2967 | spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE) | |
b128c09f BB |
2968 | zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; |
2969 | ||
2970 | if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute) | |
2971 | zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND; | |
428870ff BB |
2972 | |
2973 | /* | |
2974 | * Here is a possibly good place to attempt to do | |
2975 | * either combinatorial reconstruction or error correction | |
2976 | * based on checksums. It also might be a good place | |
2977 | * to send out preliminary ereports before we suspend | |
2978 | * processing. | |
2979 | */ | |
34dc7c2f BB |
2980 | } |
2981 | ||
2982 | /* | |
b128c09f BB |
2983 | * If there were logical child errors, they apply to us now. |
2984 | * We defer this until now to avoid conflating logical child | |
2985 | * errors with errors that happened to the zio itself when | |
2986 | * updating vdev stats and reporting FMA events above. | |
34dc7c2f | 2987 | */ |
b128c09f | 2988 | zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL); |
34dc7c2f | 2989 | |
428870ff BB |
2990 | if ((zio->io_error || zio->io_reexecute) && |
2991 | IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio && | |
2992 | !(zio->io_flags & ZIO_FLAG_IO_REWRITE)) | |
c776b317 | 2993 | zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp); |
9babb374 BB |
2994 | |
2995 | zio_gang_tree_free(&zio->io_gang_tree); | |
2996 | ||
2997 | /* | |
2998 | * Godfather I/Os should never suspend. | |
2999 | */ | |
3000 | if ((zio->io_flags & ZIO_FLAG_GODFATHER) && | |
3001 | (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) | |
3002 | zio->io_reexecute = 0; | |
3003 | ||
b128c09f BB |
3004 | if (zio->io_reexecute) { |
3005 | /* | |
3006 | * This is a logical I/O that wants to reexecute. | |
3007 | * | |
3008 | * Reexecute is top-down. When an i/o fails, if it's not | |
3009 | * the root, it simply notifies its parent and sticks around. | |
3010 | * The parent, seeing that it still has children in zio_done(), | |
3011 | * does the same. This percolates all the way up to the root. | |
3012 | * The root i/o will reexecute or suspend the entire tree. | |
3013 | * | |
3014 | * This approach ensures that zio_reexecute() honors | |
3015 | * all the original i/o dependency relationships, e.g. | |
3016 | * parents not executing until children are ready. | |
3017 | */ | |
3018 | ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL); | |
34dc7c2f | 3019 | |
9babb374 | 3020 | zio->io_gang_leader = NULL; |
b128c09f | 3021 | |
d164b209 BB |
3022 | mutex_enter(&zio->io_lock); |
3023 | zio->io_state[ZIO_WAIT_DONE] = 1; | |
3024 | mutex_exit(&zio->io_lock); | |
3025 | ||
9babb374 BB |
3026 | /* |
3027 | * "The Godfather" I/O monitors its children but is | |
3028 | * not a true parent to them. It will track them through | |
3029 | * the pipeline but severs its ties whenever they get into | |
3030 | * trouble (e.g. suspended). This allows "The Godfather" | |
3031 | * I/O to return status without blocking. | |
3032 | */ | |
3033 | for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) { | |
3034 | zio_link_t *zl = zio->io_walk_link; | |
3035 | pio_next = zio_walk_parents(zio); | |
3036 | ||
3037 | if ((pio->io_flags & ZIO_FLAG_GODFATHER) && | |
3038 | (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) { | |
3039 | zio_remove_child(pio, zio, zl); | |
3040 | zio_notify_parent(pio, zio, ZIO_WAIT_DONE); | |
3041 | } | |
3042 | } | |
3043 | ||
d164b209 | 3044 | if ((pio = zio_unique_parent(zio)) != NULL) { |
b128c09f BB |
3045 | /* |
3046 | * We're not a root i/o, so there's nothing to do | |
3047 | * but notify our parent. Don't propagate errors | |
3048 | * upward since we haven't permanently failed yet. | |
3049 | */ | |
9babb374 | 3050 | ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER)); |
b128c09f BB |
3051 | zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE; |
3052 | zio_notify_parent(pio, zio, ZIO_WAIT_DONE); | |
3053 | } else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) { | |
3054 | /* | |
3055 | * We'd fail again if we reexecuted now, so suspend | |
3056 | * until conditions improve (e.g. device comes online). | |
3057 | */ | |
c776b317 | 3058 | zio_suspend(zio->io_spa, zio); |
b128c09f BB |
3059 | } else { |
3060 | /* | |
3061 | * Reexecution is potentially a huge amount of work. | |
3062 | * Hand it off to the otherwise-unused claim taskq. | |
3063 | */ | |
a38718a6 | 3064 | ASSERT(taskq_empty_ent(&zio->io_tqent)); |
5853fe79 | 3065 | taskq_dispatch_ent( |
c776b317 | 3066 | zio->io_spa->spa_zio_taskq[ZIO_TYPE_CLAIM][ZIO_TASKQ_ISSUE], |
a38718a6 GA |
3067 | (task_func_t *)zio_reexecute, zio, 0, |
3068 | &zio->io_tqent); | |
b128c09f BB |
3069 | } |
3070 | return (ZIO_PIPELINE_STOP); | |
34dc7c2f BB |
3071 | } |
3072 | ||
428870ff | 3073 | ASSERT(zio->io_child_count == 0); |
b128c09f BB |
3074 | ASSERT(zio->io_reexecute == 0); |
3075 | ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL)); | |
34dc7c2f | 3076 | |
428870ff BB |
3077 | /* |
3078 | * Report any checksum errors, since the I/O is complete. | |
3079 | */ | |
3080 | while (zio->io_cksum_report != NULL) { | |
3081 | zio_cksum_report_t *zcr = zio->io_cksum_report; | |
3082 | zio->io_cksum_report = zcr->zcr_next; | |
3083 | zcr->zcr_next = NULL; | |
3084 | zcr->zcr_finish(zcr, NULL); | |
3085 | zfs_ereport_free_checksum(zcr); | |
3086 | } | |
3087 | ||
920dd524 ED |
3088 | if (zio->io_flags & ZIO_FLAG_FASTWRITE && zio->io_bp && |
3089 | !BP_IS_HOLE(zio->io_bp)) { | |
3090 | metaslab_fastwrite_unmark(zio->io_spa, zio->io_bp); | |
3091 | } | |
3092 | ||
d164b209 BB |
3093 | /* |
3094 | * It is the responsibility of the done callback to ensure that this | |
3095 | * particular zio is no longer discoverable for adoption, and as | |
3096 | * such, cannot acquire any new parents. | |
3097 | */ | |
b128c09f BB |
3098 | if (zio->io_done) |
3099 | zio->io_done(zio); | |
34dc7c2f | 3100 | |
d164b209 BB |
3101 | mutex_enter(&zio->io_lock); |
3102 | zio->io_state[ZIO_WAIT_DONE] = 1; | |
3103 | mutex_exit(&zio->io_lock); | |
34dc7c2f | 3104 | |
d164b209 BB |
3105 | for (pio = zio_walk_parents(zio); pio != NULL; pio = pio_next) { |
3106 | zio_link_t *zl = zio->io_walk_link; | |
3107 | pio_next = zio_walk_parents(zio); | |
3108 | zio_remove_child(pio, zio, zl); | |
b128c09f BB |
3109 | zio_notify_parent(pio, zio, ZIO_WAIT_DONE); |
3110 | } | |
34dc7c2f | 3111 | |
b128c09f BB |
3112 | if (zio->io_waiter != NULL) { |
3113 | mutex_enter(&zio->io_lock); | |
3114 | zio->io_executor = NULL; | |
3115 | cv_broadcast(&zio->io_cv); | |
3116 | mutex_exit(&zio->io_lock); | |
3117 | } else { | |
3118 | zio_destroy(zio); | |
3119 | } | |
34dc7c2f | 3120 | |
b128c09f | 3121 | return (ZIO_PIPELINE_STOP); |
34dc7c2f BB |
3122 | } |
3123 | ||
3124 | /* | |
b128c09f BB |
3125 | * ========================================================================== |
3126 | * I/O pipeline definition | |
3127 | * ========================================================================== | |
34dc7c2f | 3128 | */ |
428870ff | 3129 | static zio_pipe_stage_t *zio_pipeline[] = { |
b128c09f | 3130 | NULL, |
b128c09f | 3131 | zio_read_bp_init, |
428870ff BB |
3132 | zio_free_bp_init, |
3133 | zio_issue_async, | |
b128c09f BB |
3134 | zio_write_bp_init, |
3135 | zio_checksum_generate, | |
428870ff BB |
3136 | zio_ddt_read_start, |
3137 | zio_ddt_read_done, | |
3138 | zio_ddt_write, | |
3139 | zio_ddt_free, | |
b128c09f BB |
3140 | zio_gang_assemble, |
3141 | zio_gang_issue, | |
3142 | zio_dva_allocate, | |
3143 | zio_dva_free, | |
3144 | zio_dva_claim, | |
3145 | zio_ready, | |
3146 | zio_vdev_io_start, | |
3147 | zio_vdev_io_done, | |
3148 | zio_vdev_io_assess, | |
3149 | zio_checksum_verify, | |
3150 | zio_done | |
3151 | }; | |
c28b2279 | 3152 | |
9ae529ec CS |
3153 | /* dnp is the dnode for zb1->zb_object */ |
3154 | boolean_t | |
3155 | zbookmark_is_before(const dnode_phys_t *dnp, const zbookmark_t *zb1, | |
3156 | const zbookmark_t *zb2) | |
3157 | { | |
3158 | uint64_t zb1nextL0, zb2thisobj; | |
3159 | ||
3160 | ASSERT(zb1->zb_objset == zb2->zb_objset); | |
3161 | ASSERT(zb2->zb_level == 0); | |
3162 | ||
3163 | /* | |
3164 | * A bookmark in the deadlist is considered to be after | |
3165 | * everything else. | |
3166 | */ | |
3167 | if (zb2->zb_object == DMU_DEADLIST_OBJECT) | |
3168 | return (B_TRUE); | |
3169 | ||
3170 | /* The objset_phys_t isn't before anything. */ | |
3171 | if (dnp == NULL) | |
3172 | return (B_FALSE); | |
3173 | ||
3174 | zb1nextL0 = (zb1->zb_blkid + 1) << | |
3175 | ((zb1->zb_level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)); | |
3176 | ||
3177 | zb2thisobj = zb2->zb_object ? zb2->zb_object : | |
3178 | zb2->zb_blkid << (DNODE_BLOCK_SHIFT - DNODE_SHIFT); | |
3179 | ||
3180 | if (zb1->zb_object == DMU_META_DNODE_OBJECT) { | |
3181 | uint64_t nextobj = zb1nextL0 * | |
3182 | (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT) >> DNODE_SHIFT; | |
3183 | return (nextobj <= zb2thisobj); | |
3184 | } | |
3185 | ||
3186 | if (zb1->zb_object < zb2thisobj) | |
3187 | return (B_TRUE); | |
3188 | if (zb1->zb_object > zb2thisobj) | |
3189 | return (B_FALSE); | |
3190 | if (zb2->zb_object == DMU_META_DNODE_OBJECT) | |
3191 | return (B_FALSE); | |
3192 | return (zb1nextL0 <= zb2->zb_blkid); | |
3193 | } | |
3194 | ||
c28b2279 BB |
3195 | #if defined(_KERNEL) && defined(HAVE_SPL) |
3196 | /* Fault injection */ | |
3197 | EXPORT_SYMBOL(zio_injection_enabled); | |
3198 | EXPORT_SYMBOL(zio_inject_fault); | |
3199 | EXPORT_SYMBOL(zio_inject_list_next); | |
3200 | EXPORT_SYMBOL(zio_clear_fault); | |
3201 | EXPORT_SYMBOL(zio_handle_fault_injection); | |
3202 | EXPORT_SYMBOL(zio_handle_device_injection); | |
3203 | EXPORT_SYMBOL(zio_handle_label_injection); | |
3204 | EXPORT_SYMBOL(zio_priority_table); | |
3205 | EXPORT_SYMBOL(zio_type_name); | |
3206 | ||
3207 | module_param(zio_bulk_flags, int, 0644); | |
3208 | MODULE_PARM_DESC(zio_bulk_flags, "Additional flags to pass to bulk buffers"); | |
a69052be BB |
3209 | |
3210 | module_param(zio_delay_max, int, 0644); | |
c409e464 BB |
3211 | MODULE_PARM_DESC(zio_delay_max, "Max zio millisec delay before posting event"); |
3212 | ||
3213 | module_param(zio_requeue_io_start_cut_in_line, int, 0644); | |
3214 | MODULE_PARM_DESC(zio_requeue_io_start_cut_in_line, "Prioritize requeued I/O"); | |
c28b2279 | 3215 | #endif |