4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/zfs_context.h>
27 #include <sys/fm/fs/zfs.h>
30 #include <sys/spa_impl.h>
31 #include <sys/vdev_impl.h>
32 #include <sys/zio_impl.h>
33 #include <sys/zio_compress.h>
34 #include <sys/zio_checksum.h>
37 * ==========================================================================
39 * ==========================================================================
41 uint8_t zio_priority_table
[ZIO_PRIORITY_TABLE_SIZE
] = {
42 0, /* ZIO_PRIORITY_NOW */
43 0, /* ZIO_PRIORITY_SYNC_READ */
44 0, /* ZIO_PRIORITY_SYNC_WRITE */
45 6, /* ZIO_PRIORITY_ASYNC_READ */
46 4, /* ZIO_PRIORITY_ASYNC_WRITE */
47 4, /* ZIO_PRIORITY_FREE */
48 0, /* ZIO_PRIORITY_CACHE_FILL */
49 0, /* ZIO_PRIORITY_LOG_WRITE */
50 10, /* ZIO_PRIORITY_RESILVER */
51 20, /* ZIO_PRIORITY_SCRUB */
55 * ==========================================================================
56 * I/O type descriptions
57 * ==========================================================================
59 char *zio_type_name
[ZIO_TYPES
] = {
60 "null", "read", "write", "free", "claim", "ioctl" };
62 #define SYNC_PASS_DEFERRED_FREE 1 /* defer frees after this pass */
63 #define SYNC_PASS_DONT_COMPRESS 4 /* don't compress after this pass */
64 #define SYNC_PASS_REWRITE 1 /* rewrite new bps after this pass */
67 * ==========================================================================
69 * ==========================================================================
71 kmem_cache_t
*zio_cache
;
72 kmem_cache_t
*zio_link_cache
;
73 kmem_cache_t
*zio_buf_cache
[SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
];
74 kmem_cache_t
*zio_data_buf_cache
[SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
];
77 extern vmem_t
*zio_alloc_arena
;
81 * An allocating zio is one that either currently has the DVA allocate
82 * stage set or will have it later in its lifetime.
84 #define IO_IS_ALLOCATING(zio) \
85 ((zio)->io_orig_pipeline & (1U << ZIO_STAGE_DVA_ALLOCATE))
91 vmem_t
*data_alloc_arena
= NULL
;
94 data_alloc_arena
= zio_alloc_arena
;
96 zio_cache
= kmem_cache_create("zio_cache",
97 sizeof (zio_t
), 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
98 zio_link_cache
= kmem_cache_create("zio_link_cache",
99 sizeof (zio_link_t
), 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
102 * For small buffers, we want a cache for each multiple of
103 * SPA_MINBLOCKSIZE. For medium-size buffers, we want a cache
104 * for each quarter-power of 2. For large buffers, we want
105 * a cache for each multiple of PAGESIZE.
107 for (c
= 0; c
< SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
; c
++) {
108 size_t size
= (c
+ 1) << SPA_MINBLOCKSHIFT
;
112 while (p2
& (p2
- 1))
115 if (size
<= 4 * SPA_MINBLOCKSIZE
) {
116 align
= SPA_MINBLOCKSIZE
;
117 } else if (P2PHASE(size
, PAGESIZE
) == 0) {
119 } else if (P2PHASE(size
, p2
>> 2) == 0) {
125 (void) sprintf(name
, "zio_buf_%lu", (ulong_t
)size
);
126 zio_buf_cache
[c
] = kmem_cache_create(name
, size
,
127 align
, NULL
, NULL
, NULL
, NULL
, NULL
, KMC_NODEBUG
);
129 (void) sprintf(name
, "zio_data_buf_%lu", (ulong_t
)size
);
130 zio_data_buf_cache
[c
] = kmem_cache_create(name
, size
,
131 align
, NULL
, NULL
, NULL
, NULL
, data_alloc_arena
,
137 ASSERT(zio_buf_cache
[c
] != NULL
);
138 if (zio_buf_cache
[c
- 1] == NULL
)
139 zio_buf_cache
[c
- 1] = zio_buf_cache
[c
];
141 ASSERT(zio_data_buf_cache
[c
] != NULL
);
142 if (zio_data_buf_cache
[c
- 1] == NULL
)
143 zio_data_buf_cache
[c
- 1] = zio_data_buf_cache
[c
];
153 kmem_cache_t
*last_cache
= NULL
;
154 kmem_cache_t
*last_data_cache
= NULL
;
156 for (c
= 0; c
< SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
; c
++) {
157 if (zio_buf_cache
[c
] != last_cache
) {
158 last_cache
= zio_buf_cache
[c
];
159 kmem_cache_destroy(zio_buf_cache
[c
]);
161 zio_buf_cache
[c
] = NULL
;
163 if (zio_data_buf_cache
[c
] != last_data_cache
) {
164 last_data_cache
= zio_data_buf_cache
[c
];
165 kmem_cache_destroy(zio_data_buf_cache
[c
]);
167 zio_data_buf_cache
[c
] = NULL
;
170 kmem_cache_destroy(zio_link_cache
);
171 kmem_cache_destroy(zio_cache
);
177 * ==========================================================================
178 * Allocate and free I/O buffers
179 * ==========================================================================
183 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a
184 * crashdump if the kernel panics, so use it judiciously. Obviously, it's
185 * useful to inspect ZFS metadata, but if possible, we should avoid keeping
186 * excess / transient data in-core during a crashdump.
189 zio_buf_alloc(size_t size
)
191 size_t c
= (size
- 1) >> SPA_MINBLOCKSHIFT
;
193 ASSERT(c
< SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
);
195 return (kmem_cache_alloc(zio_buf_cache
[c
], KM_PUSHPAGE
));
199 * Use zio_data_buf_alloc to allocate data. The data will not appear in a
200 * crashdump if the kernel panics. This exists so that we will limit the amount
201 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount
202 * of kernel heap dumped to disk when the kernel panics)
205 zio_data_buf_alloc(size_t size
)
207 size_t c
= (size
- 1) >> SPA_MINBLOCKSHIFT
;
209 ASSERT(c
< SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
);
211 return (kmem_cache_alloc(zio_data_buf_cache
[c
], KM_PUSHPAGE
));
215 zio_buf_free(void *buf
, size_t size
)
217 size_t c
= (size
- 1) >> SPA_MINBLOCKSHIFT
;
219 ASSERT(c
< SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
);
221 kmem_cache_free(zio_buf_cache
[c
], buf
);
225 zio_data_buf_free(void *buf
, size_t size
)
227 size_t c
= (size
- 1) >> SPA_MINBLOCKSHIFT
;
229 ASSERT(c
< SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
);
231 kmem_cache_free(zio_data_buf_cache
[c
], buf
);
235 * ==========================================================================
236 * Push and pop I/O transform buffers
237 * ==========================================================================
240 zio_push_transform(zio_t
*zio
, void *data
, uint64_t size
, uint64_t bufsize
,
241 zio_transform_func_t
*transform
)
243 zio_transform_t
*zt
= kmem_alloc(sizeof (zio_transform_t
), KM_SLEEP
);
245 zt
->zt_orig_data
= zio
->io_data
;
246 zt
->zt_orig_size
= zio
->io_size
;
247 zt
->zt_bufsize
= bufsize
;
248 zt
->zt_transform
= transform
;
250 zt
->zt_next
= zio
->io_transform_stack
;
251 zio
->io_transform_stack
= zt
;
258 zio_pop_transforms(zio_t
*zio
)
262 while ((zt
= zio
->io_transform_stack
) != NULL
) {
263 if (zt
->zt_transform
!= NULL
)
264 zt
->zt_transform(zio
,
265 zt
->zt_orig_data
, zt
->zt_orig_size
);
267 zio_buf_free(zio
->io_data
, zt
->zt_bufsize
);
269 zio
->io_data
= zt
->zt_orig_data
;
270 zio
->io_size
= zt
->zt_orig_size
;
271 zio
->io_transform_stack
= zt
->zt_next
;
273 kmem_free(zt
, sizeof (zio_transform_t
));
278 * ==========================================================================
279 * I/O transform callbacks for subblocks and decompression
280 * ==========================================================================
283 zio_subblock(zio_t
*zio
, void *data
, uint64_t size
)
285 ASSERT(zio
->io_size
> size
);
287 if (zio
->io_type
== ZIO_TYPE_READ
)
288 bcopy(zio
->io_data
, data
, size
);
292 zio_decompress(zio_t
*zio
, void *data
, uint64_t size
)
294 if (zio
->io_error
== 0 &&
295 zio_decompress_data(BP_GET_COMPRESS(zio
->io_bp
),
296 zio
->io_data
, zio
->io_size
, data
, size
) != 0)
301 * ==========================================================================
302 * I/O parent/child relationships and pipeline interlocks
303 * ==========================================================================
306 * NOTE - Callers to zio_walk_parents() and zio_walk_children must
307 * continue calling these functions until they return NULL.
308 * Otherwise, the next caller will pick up the list walk in
309 * some indeterminate state. (Otherwise every caller would
310 * have to pass in a cookie to keep the state represented by
311 * io_walk_link, which gets annoying.)
314 zio_walk_parents(zio_t
*cio
)
316 zio_link_t
*zl
= cio
->io_walk_link
;
317 list_t
*pl
= &cio
->io_parent_list
;
319 zl
= (zl
== NULL
) ? list_head(pl
) : list_next(pl
, zl
);
320 cio
->io_walk_link
= zl
;
325 ASSERT(zl
->zl_child
== cio
);
326 return (zl
->zl_parent
);
330 zio_walk_children(zio_t
*pio
)
332 zio_link_t
*zl
= pio
->io_walk_link
;
333 list_t
*cl
= &pio
->io_child_list
;
335 zl
= (zl
== NULL
) ? list_head(cl
) : list_next(cl
, zl
);
336 pio
->io_walk_link
= zl
;
341 ASSERT(zl
->zl_parent
== pio
);
342 return (zl
->zl_child
);
346 zio_unique_parent(zio_t
*cio
)
348 zio_t
*pio
= zio_walk_parents(cio
);
350 VERIFY(zio_walk_parents(cio
) == NULL
);
355 zio_add_child(zio_t
*pio
, zio_t
*cio
)
357 zio_link_t
*zl
= kmem_cache_alloc(zio_link_cache
, KM_SLEEP
);
360 * Logical I/Os can have logical, gang, or vdev children.
361 * Gang I/Os can have gang or vdev children.
362 * Vdev I/Os can only have vdev children.
363 * The following ASSERT captures all of these constraints.
365 ASSERT(cio
->io_child_type
<= pio
->io_child_type
);
370 mutex_enter(&cio
->io_lock
);
371 mutex_enter(&pio
->io_lock
);
373 ASSERT(pio
->io_state
[ZIO_WAIT_DONE
] == 0);
375 for (int w
= 0; w
< ZIO_WAIT_TYPES
; w
++)
376 pio
->io_children
[cio
->io_child_type
][w
] += !cio
->io_state
[w
];
378 list_insert_head(&pio
->io_child_list
, zl
);
379 list_insert_head(&cio
->io_parent_list
, zl
);
381 mutex_exit(&pio
->io_lock
);
382 mutex_exit(&cio
->io_lock
);
386 zio_remove_child(zio_t
*pio
, zio_t
*cio
, zio_link_t
*zl
)
388 ASSERT(zl
->zl_parent
== pio
);
389 ASSERT(zl
->zl_child
== cio
);
391 mutex_enter(&cio
->io_lock
);
392 mutex_enter(&pio
->io_lock
);
394 list_remove(&pio
->io_child_list
, zl
);
395 list_remove(&cio
->io_parent_list
, zl
);
397 mutex_exit(&pio
->io_lock
);
398 mutex_exit(&cio
->io_lock
);
400 kmem_cache_free(zio_link_cache
, zl
);
404 zio_wait_for_children(zio_t
*zio
, enum zio_child child
, enum zio_wait_type wait
)
406 uint64_t *countp
= &zio
->io_children
[child
][wait
];
407 boolean_t waiting
= B_FALSE
;
409 mutex_enter(&zio
->io_lock
);
410 ASSERT(zio
->io_stall
== NULL
);
413 zio
->io_stall
= countp
;
416 mutex_exit(&zio
->io_lock
);
422 zio_notify_parent(zio_t
*pio
, zio_t
*zio
, enum zio_wait_type wait
)
424 uint64_t *countp
= &pio
->io_children
[zio
->io_child_type
][wait
];
425 int *errorp
= &pio
->io_child_error
[zio
->io_child_type
];
427 mutex_enter(&pio
->io_lock
);
428 if (zio
->io_error
&& !(zio
->io_flags
& ZIO_FLAG_DONT_PROPAGATE
))
429 *errorp
= zio_worst_error(*errorp
, zio
->io_error
);
430 pio
->io_reexecute
|= zio
->io_reexecute
;
431 ASSERT3U(*countp
, >, 0);
432 if (--*countp
== 0 && pio
->io_stall
== countp
) {
433 pio
->io_stall
= NULL
;
434 mutex_exit(&pio
->io_lock
);
437 mutex_exit(&pio
->io_lock
);
442 zio_inherit_child_errors(zio_t
*zio
, enum zio_child c
)
444 if (zio
->io_child_error
[c
] != 0 && zio
->io_error
== 0)
445 zio
->io_error
= zio
->io_child_error
[c
];
449 * ==========================================================================
450 * Create the various types of I/O (read, write, free, etc)
451 * ==========================================================================
454 zio_create(zio_t
*pio
, spa_t
*spa
, uint64_t txg
, blkptr_t
*bp
,
455 void *data
, uint64_t size
, zio_done_func_t
*done
, void *private,
456 zio_type_t type
, int priority
, int flags
, vdev_t
*vd
, uint64_t offset
,
457 const zbookmark_t
*zb
, uint8_t stage
, uint32_t pipeline
)
461 ASSERT3U(size
, <=, SPA_MAXBLOCKSIZE
);
462 ASSERT(P2PHASE(size
, SPA_MINBLOCKSIZE
) == 0);
463 ASSERT(P2PHASE(offset
, SPA_MINBLOCKSIZE
) == 0);
465 ASSERT(!vd
|| spa_config_held(spa
, SCL_STATE_ALL
, RW_READER
));
466 ASSERT(!bp
|| !(flags
& ZIO_FLAG_CONFIG_WRITER
));
467 ASSERT(vd
|| stage
== ZIO_STAGE_OPEN
);
469 zio
= kmem_cache_alloc(zio_cache
, KM_SLEEP
);
470 bzero(zio
, sizeof (zio_t
));
472 mutex_init(&zio
->io_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
473 cv_init(&zio
->io_cv
, NULL
, CV_DEFAULT
, NULL
);
475 list_create(&zio
->io_parent_list
, sizeof (zio_link_t
),
476 offsetof(zio_link_t
, zl_parent_node
));
477 list_create(&zio
->io_child_list
, sizeof (zio_link_t
),
478 offsetof(zio_link_t
, zl_child_node
));
481 zio
->io_child_type
= ZIO_CHILD_VDEV
;
482 else if (flags
& ZIO_FLAG_GANG_CHILD
)
483 zio
->io_child_type
= ZIO_CHILD_GANG
;
485 zio
->io_child_type
= ZIO_CHILD_LOGICAL
;
489 zio
->io_bp_copy
= *bp
;
490 zio
->io_bp_orig
= *bp
;
491 if (type
!= ZIO_TYPE_WRITE
)
492 zio
->io_bp
= &zio
->io_bp_copy
; /* so caller can free */
493 if (zio
->io_child_type
== ZIO_CHILD_LOGICAL
)
494 zio
->io_logical
= zio
;
495 if (zio
->io_child_type
> ZIO_CHILD_GANG
&& BP_IS_GANG(bp
))
496 pipeline
|= ZIO_GANG_STAGES
;
504 zio
->io_private
= private;
506 zio
->io_priority
= priority
;
508 zio
->io_offset
= offset
;
509 zio
->io_orig_flags
= zio
->io_flags
= flags
;
510 zio
->io_orig_stage
= zio
->io_stage
= stage
;
511 zio
->io_orig_pipeline
= zio
->io_pipeline
= pipeline
;
513 zio
->io_state
[ZIO_WAIT_READY
] = (stage
>= ZIO_STAGE_READY
);
514 zio
->io_state
[ZIO_WAIT_DONE
] = (stage
>= ZIO_STAGE_DONE
);
517 zio
->io_bookmark
= *zb
;
520 if (zio
->io_logical
== NULL
)
521 zio
->io_logical
= pio
->io_logical
;
522 if (zio
->io_child_type
== ZIO_CHILD_GANG
)
523 zio
->io_gang_leader
= pio
->io_gang_leader
;
524 zio_add_child(pio
, zio
);
531 zio_destroy(zio_t
*zio
)
533 list_destroy(&zio
->io_parent_list
);
534 list_destroy(&zio
->io_child_list
);
535 mutex_destroy(&zio
->io_lock
);
536 cv_destroy(&zio
->io_cv
);
537 kmem_cache_free(zio_cache
, zio
);
541 zio_null(zio_t
*pio
, spa_t
*spa
, vdev_t
*vd
, zio_done_func_t
*done
,
542 void *private, int flags
)
546 zio
= zio_create(pio
, spa
, 0, NULL
, NULL
, 0, done
, private,
547 ZIO_TYPE_NULL
, ZIO_PRIORITY_NOW
, flags
, vd
, 0, NULL
,
548 ZIO_STAGE_OPEN
, ZIO_INTERLOCK_PIPELINE
);
554 zio_root(spa_t
*spa
, zio_done_func_t
*done
, void *private, int flags
)
556 return (zio_null(NULL
, spa
, NULL
, done
, private, flags
));
560 zio_read(zio_t
*pio
, spa_t
*spa
, const blkptr_t
*bp
,
561 void *data
, uint64_t size
, zio_done_func_t
*done
, void *private,
562 int priority
, int flags
, const zbookmark_t
*zb
)
566 zio
= zio_create(pio
, spa
, bp
->blk_birth
, (blkptr_t
*)bp
,
567 data
, size
, done
, private,
568 ZIO_TYPE_READ
, priority
, flags
, NULL
, 0, zb
,
569 ZIO_STAGE_OPEN
, ZIO_READ_PIPELINE
);
575 zio_skip_write(zio_t
*zio
)
577 ASSERT(zio
->io_type
== ZIO_TYPE_WRITE
);
578 ASSERT(zio
->io_stage
== ZIO_STAGE_READY
);
579 ASSERT(!BP_IS_GANG(zio
->io_bp
));
581 zio
->io_pipeline
&= ~ZIO_VDEV_IO_STAGES
;
585 zio_write(zio_t
*pio
, spa_t
*spa
, uint64_t txg
, blkptr_t
*bp
,
586 void *data
, uint64_t size
, zio_prop_t
*zp
,
587 zio_done_func_t
*ready
, zio_done_func_t
*done
, void *private,
588 int priority
, int flags
, const zbookmark_t
*zb
)
592 ASSERT(zp
->zp_checksum
>= ZIO_CHECKSUM_OFF
&&
593 zp
->zp_checksum
< ZIO_CHECKSUM_FUNCTIONS
&&
594 zp
->zp_compress
>= ZIO_COMPRESS_OFF
&&
595 zp
->zp_compress
< ZIO_COMPRESS_FUNCTIONS
&&
596 zp
->zp_type
< DMU_OT_NUMTYPES
&&
599 zp
->zp_ndvas
<= spa_max_replication(spa
));
600 ASSERT(ready
!= NULL
);
602 zio
= zio_create(pio
, spa
, txg
, bp
, data
, size
, done
, private,
603 ZIO_TYPE_WRITE
, priority
, flags
, NULL
, 0, zb
,
604 ZIO_STAGE_OPEN
, ZIO_WRITE_PIPELINE
);
606 zio
->io_ready
= ready
;
613 zio_rewrite(zio_t
*pio
, spa_t
*spa
, uint64_t txg
, blkptr_t
*bp
, void *data
,
614 uint64_t size
, zio_done_func_t
*done
, void *private, int priority
,
615 int flags
, zbookmark_t
*zb
)
619 zio
= zio_create(pio
, spa
, txg
, bp
, data
, size
, done
, private,
620 ZIO_TYPE_WRITE
, priority
, flags
, NULL
, 0, zb
,
621 ZIO_STAGE_OPEN
, ZIO_REWRITE_PIPELINE
);
627 zio_free(zio_t
*pio
, spa_t
*spa
, uint64_t txg
, blkptr_t
*bp
,
628 zio_done_func_t
*done
, void *private, int flags
)
632 ASSERT(!BP_IS_HOLE(bp
));
634 if (bp
->blk_fill
== BLK_FILL_ALREADY_FREED
)
635 return (zio_null(pio
, spa
, NULL
, NULL
, NULL
, flags
));
637 if (txg
== spa
->spa_syncing_txg
&&
638 spa_sync_pass(spa
) > SYNC_PASS_DEFERRED_FREE
) {
639 bplist_enqueue_deferred(&spa
->spa_sync_bplist
, bp
);
640 return (zio_null(pio
, spa
, NULL
, NULL
, NULL
, flags
));
643 zio
= zio_create(pio
, spa
, txg
, bp
, NULL
, BP_GET_PSIZE(bp
),
644 done
, private, ZIO_TYPE_FREE
, ZIO_PRIORITY_FREE
, flags
,
645 NULL
, 0, NULL
, ZIO_STAGE_OPEN
, ZIO_FREE_PIPELINE
);
651 zio_claim(zio_t
*pio
, spa_t
*spa
, uint64_t txg
, blkptr_t
*bp
,
652 zio_done_func_t
*done
, void *private, int flags
)
657 * A claim is an allocation of a specific block. Claims are needed
658 * to support immediate writes in the intent log. The issue is that
659 * immediate writes contain committed data, but in a txg that was
660 * *not* committed. Upon opening the pool after an unclean shutdown,
661 * the intent log claims all blocks that contain immediate write data
662 * so that the SPA knows they're in use.
664 * All claims *must* be resolved in the first txg -- before the SPA
665 * starts allocating blocks -- so that nothing is allocated twice.
667 ASSERT3U(spa
->spa_uberblock
.ub_rootbp
.blk_birth
, <, spa_first_txg(spa
));
668 ASSERT3U(spa_first_txg(spa
), <=, txg
);
670 zio
= zio_create(pio
, spa
, txg
, bp
, NULL
, BP_GET_PSIZE(bp
),
671 done
, private, ZIO_TYPE_CLAIM
, ZIO_PRIORITY_NOW
, flags
,
672 NULL
, 0, NULL
, ZIO_STAGE_OPEN
, ZIO_CLAIM_PIPELINE
);
678 zio_ioctl(zio_t
*pio
, spa_t
*spa
, vdev_t
*vd
, int cmd
,
679 zio_done_func_t
*done
, void *private, int priority
, int flags
)
684 if (vd
->vdev_children
== 0) {
685 zio
= zio_create(pio
, spa
, 0, NULL
, NULL
, 0, done
, private,
686 ZIO_TYPE_IOCTL
, priority
, flags
, vd
, 0, NULL
,
687 ZIO_STAGE_OPEN
, ZIO_IOCTL_PIPELINE
);
691 zio
= zio_null(pio
, spa
, NULL
, NULL
, NULL
, flags
);
693 for (c
= 0; c
< vd
->vdev_children
; c
++)
694 zio_nowait(zio_ioctl(zio
, spa
, vd
->vdev_child
[c
], cmd
,
695 done
, private, priority
, flags
));
702 zio_read_phys(zio_t
*pio
, vdev_t
*vd
, uint64_t offset
, uint64_t size
,
703 void *data
, int checksum
, zio_done_func_t
*done
, void *private,
704 int priority
, int flags
, boolean_t labels
)
708 ASSERT(vd
->vdev_children
== 0);
709 ASSERT(!labels
|| offset
+ size
<= VDEV_LABEL_START_SIZE
||
710 offset
>= vd
->vdev_psize
- VDEV_LABEL_END_SIZE
);
711 ASSERT3U(offset
+ size
, <=, vd
->vdev_psize
);
713 zio
= zio_create(pio
, vd
->vdev_spa
, 0, NULL
, data
, size
, done
, private,
714 ZIO_TYPE_READ
, priority
, flags
, vd
, offset
, NULL
,
715 ZIO_STAGE_OPEN
, ZIO_READ_PHYS_PIPELINE
);
717 zio
->io_prop
.zp_checksum
= checksum
;
723 zio_write_phys(zio_t
*pio
, vdev_t
*vd
, uint64_t offset
, uint64_t size
,
724 void *data
, int checksum
, zio_done_func_t
*done
, void *private,
725 int priority
, int flags
, boolean_t labels
)
729 ASSERT(vd
->vdev_children
== 0);
730 ASSERT(!labels
|| offset
+ size
<= VDEV_LABEL_START_SIZE
||
731 offset
>= vd
->vdev_psize
- VDEV_LABEL_END_SIZE
);
732 ASSERT3U(offset
+ size
, <=, vd
->vdev_psize
);
734 zio
= zio_create(pio
, vd
->vdev_spa
, 0, NULL
, data
, size
, done
, private,
735 ZIO_TYPE_WRITE
, priority
, flags
, vd
, offset
, NULL
,
736 ZIO_STAGE_OPEN
, ZIO_WRITE_PHYS_PIPELINE
);
738 zio
->io_prop
.zp_checksum
= checksum
;
740 if (zio_checksum_table
[checksum
].ci_zbt
) {
742 * zbt checksums are necessarily destructive -- they modify
743 * the end of the write buffer to hold the verifier/checksum.
744 * Therefore, we must make a local copy in case the data is
745 * being written to multiple places in parallel.
747 void *wbuf
= zio_buf_alloc(size
);
748 bcopy(data
, wbuf
, size
);
749 zio_push_transform(zio
, wbuf
, size
, size
, NULL
);
756 * Create a child I/O to do some work for us.
759 zio_vdev_child_io(zio_t
*pio
, blkptr_t
*bp
, vdev_t
*vd
, uint64_t offset
,
760 void *data
, uint64_t size
, int type
, int priority
, int flags
,
761 zio_done_func_t
*done
, void *private)
763 uint32_t pipeline
= ZIO_VDEV_CHILD_PIPELINE
;
766 ASSERT(vd
->vdev_parent
==
767 (pio
->io_vd
? pio
->io_vd
: pio
->io_spa
->spa_root_vdev
));
769 if (type
== ZIO_TYPE_READ
&& bp
!= NULL
) {
771 * If we have the bp, then the child should perform the
772 * checksum and the parent need not. This pushes error
773 * detection as close to the leaves as possible and
774 * eliminates redundant checksums in the interior nodes.
776 pipeline
|= 1U << ZIO_STAGE_CHECKSUM_VERIFY
;
777 pio
->io_pipeline
&= ~(1U << ZIO_STAGE_CHECKSUM_VERIFY
);
780 if (vd
->vdev_children
== 0)
781 offset
+= VDEV_LABEL_START_SIZE
;
783 zio
= zio_create(pio
, pio
->io_spa
, pio
->io_txg
, bp
, data
, size
,
784 done
, private, type
, priority
,
785 (pio
->io_flags
& ZIO_FLAG_VDEV_INHERIT
) |
786 ZIO_FLAG_CANFAIL
| ZIO_FLAG_DONT_PROPAGATE
| flags
,
787 vd
, offset
, &pio
->io_bookmark
,
788 ZIO_STAGE_VDEV_IO_START
- 1, pipeline
);
794 zio_vdev_delegated_io(vdev_t
*vd
, uint64_t offset
, void *data
, uint64_t size
,
795 int type
, int priority
, int flags
, zio_done_func_t
*done
, void *private)
799 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
801 zio
= zio_create(NULL
, vd
->vdev_spa
, 0, NULL
,
802 data
, size
, done
, private, type
, priority
,
803 flags
| ZIO_FLAG_CANFAIL
| ZIO_FLAG_DONT_RETRY
,
805 ZIO_STAGE_VDEV_IO_START
- 1, ZIO_VDEV_CHILD_PIPELINE
);
811 zio_flush(zio_t
*zio
, vdev_t
*vd
)
813 zio_nowait(zio_ioctl(zio
, zio
->io_spa
, vd
, DKIOCFLUSHWRITECACHE
,
814 NULL
, NULL
, ZIO_PRIORITY_NOW
,
815 ZIO_FLAG_CANFAIL
| ZIO_FLAG_DONT_PROPAGATE
| ZIO_FLAG_DONT_RETRY
));
819 * ==========================================================================
820 * Prepare to read and write logical blocks
821 * ==========================================================================
825 zio_read_bp_init(zio_t
*zio
)
827 blkptr_t
*bp
= zio
->io_bp
;
829 if (BP_GET_COMPRESS(bp
) != ZIO_COMPRESS_OFF
&&
830 zio
->io_child_type
== ZIO_CHILD_LOGICAL
&&
831 !(zio
->io_flags
& ZIO_FLAG_RAW
)) {
832 uint64_t csize
= BP_GET_PSIZE(bp
);
833 void *cbuf
= zio_buf_alloc(csize
);
835 zio_push_transform(zio
, cbuf
, csize
, csize
, zio_decompress
);
838 if (!dmu_ot
[BP_GET_TYPE(bp
)].ot_metadata
&& BP_GET_LEVEL(bp
) == 0)
839 zio
->io_flags
|= ZIO_FLAG_DONT_CACHE
;
841 return (ZIO_PIPELINE_CONTINUE
);
845 zio_write_bp_init(zio_t
*zio
)
847 zio_prop_t
*zp
= &zio
->io_prop
;
848 int compress
= zp
->zp_compress
;
849 blkptr_t
*bp
= zio
->io_bp
;
851 uint64_t lsize
= zio
->io_size
;
852 uint64_t csize
= lsize
;
853 uint64_t cbufsize
= 0;
857 * If our children haven't all reached the ready stage,
858 * wait for them and then repeat this pipeline stage.
860 if (zio_wait_for_children(zio
, ZIO_CHILD_GANG
, ZIO_WAIT_READY
) ||
861 zio_wait_for_children(zio
, ZIO_CHILD_LOGICAL
, ZIO_WAIT_READY
))
862 return (ZIO_PIPELINE_STOP
);
864 if (!IO_IS_ALLOCATING(zio
))
865 return (ZIO_PIPELINE_CONTINUE
);
867 ASSERT(compress
!= ZIO_COMPRESS_INHERIT
);
869 if (bp
->blk_birth
== zio
->io_txg
) {
871 * We're rewriting an existing block, which means we're
872 * working on behalf of spa_sync(). For spa_sync() to
873 * converge, it must eventually be the case that we don't
874 * have to allocate new blocks. But compression changes
875 * the blocksize, which forces a reallocate, and makes
876 * convergence take longer. Therefore, after the first
877 * few passes, stop compressing to ensure convergence.
879 pass
= spa_sync_pass(zio
->io_spa
);
881 if (pass
> SYNC_PASS_DONT_COMPRESS
)
882 compress
= ZIO_COMPRESS_OFF
;
884 /* Make sure someone doesn't change their mind on overwrites */
885 ASSERT(MIN(zp
->zp_ndvas
+ BP_IS_GANG(bp
),
886 spa_max_replication(zio
->io_spa
)) == BP_GET_NDVAS(bp
));
889 if (compress
!= ZIO_COMPRESS_OFF
) {
890 if (!zio_compress_data(compress
, zio
->io_data
, zio
->io_size
,
891 &cbuf
, &csize
, &cbufsize
)) {
892 compress
= ZIO_COMPRESS_OFF
;
893 } else if (csize
!= 0) {
894 zio_push_transform(zio
, cbuf
, csize
, cbufsize
, NULL
);
899 * The final pass of spa_sync() must be all rewrites, but the first
900 * few passes offer a trade-off: allocating blocks defers convergence,
901 * but newly allocated blocks are sequential, so they can be written
902 * to disk faster. Therefore, we allow the first few passes of
903 * spa_sync() to allocate new blocks, but force rewrites after that.
904 * There should only be a handful of blocks after pass 1 in any case.
906 if (bp
->blk_birth
== zio
->io_txg
&& BP_GET_PSIZE(bp
) == csize
&&
907 pass
> SYNC_PASS_REWRITE
) {
909 uint32_t gang_stages
= zio
->io_pipeline
& ZIO_GANG_STAGES
;
910 zio
->io_pipeline
= ZIO_REWRITE_PIPELINE
| gang_stages
;
911 zio
->io_flags
|= ZIO_FLAG_IO_REWRITE
;
914 zio
->io_pipeline
= ZIO_WRITE_PIPELINE
;
918 zio
->io_pipeline
= ZIO_INTERLOCK_PIPELINE
;
920 ASSERT(zp
->zp_checksum
!= ZIO_CHECKSUM_GANG_HEADER
);
921 BP_SET_LSIZE(bp
, lsize
);
922 BP_SET_PSIZE(bp
, csize
);
923 BP_SET_COMPRESS(bp
, compress
);
924 BP_SET_CHECKSUM(bp
, zp
->zp_checksum
);
925 BP_SET_TYPE(bp
, zp
->zp_type
);
926 BP_SET_LEVEL(bp
, zp
->zp_level
);
927 BP_SET_BYTEORDER(bp
, ZFS_HOST_BYTEORDER
);
930 return (ZIO_PIPELINE_CONTINUE
);
934 * ==========================================================================
935 * Execute the I/O pipeline
936 * ==========================================================================
940 zio_taskq_dispatch(zio_t
*zio
, enum zio_taskq_type q
)
942 zio_type_t t
= zio
->io_type
;
945 * If we're a config writer or a probe, the normal issue and
946 * interrupt threads may all be blocked waiting for the config lock.
947 * In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL.
949 if (zio
->io_flags
& (ZIO_FLAG_CONFIG_WRITER
| ZIO_FLAG_PROBE
))
953 * A similar issue exists for the L2ARC write thread until L2ARC 2.0.
955 if (t
== ZIO_TYPE_WRITE
&& zio
->io_vd
&& zio
->io_vd
->vdev_aux
)
958 (void) taskq_dispatch(zio
->io_spa
->spa_zio_taskq
[t
][q
],
959 (task_func_t
*)zio_execute
, zio
, TQ_SLEEP
);
963 zio_taskq_member(zio_t
*zio
, enum zio_taskq_type q
)
965 kthread_t
*executor
= zio
->io_executor
;
966 spa_t
*spa
= zio
->io_spa
;
968 for (zio_type_t t
= 0; t
< ZIO_TYPES
; t
++)
969 if (taskq_member(spa
->spa_zio_taskq
[t
][q
], executor
))
976 zio_issue_async(zio_t
*zio
)
978 zio_taskq_dispatch(zio
, ZIO_TASKQ_ISSUE
);
980 return (ZIO_PIPELINE_STOP
);
984 zio_interrupt(zio_t
*zio
)
986 zio_taskq_dispatch(zio
, ZIO_TASKQ_INTERRUPT
);
990 * Execute the I/O pipeline until one of the following occurs:
991 * (1) the I/O completes; (2) the pipeline stalls waiting for
992 * dependent child I/Os; (3) the I/O issues, so we're waiting
993 * for an I/O completion interrupt; (4) the I/O is delegated by
994 * vdev-level caching or aggregation; (5) the I/O is deferred
995 * due to vdev-level queueing; (6) the I/O is handed off to
996 * another thread. In all cases, the pipeline stops whenever
997 * there's no CPU work; it never burns a thread in cv_wait().
999 * There's no locking on io_stage because there's no legitimate way
1000 * for multiple threads to be attempting to process the same I/O.
1002 static zio_pipe_stage_t
*zio_pipeline
[ZIO_STAGES
];
1005 zio_execute(zio_t
*zio
)
1007 zio
->io_executor
= curthread
;
1009 while (zio
->io_stage
< ZIO_STAGE_DONE
) {
1010 uint32_t pipeline
= zio
->io_pipeline
;
1011 zio_stage_t stage
= zio
->io_stage
;
1014 ASSERT(!MUTEX_HELD(&zio
->io_lock
));
1016 while (((1U << ++stage
) & pipeline
) == 0)
1019 ASSERT(stage
<= ZIO_STAGE_DONE
);
1020 ASSERT(zio
->io_stall
== NULL
);
1023 * If we are in interrupt context and this pipeline stage
1024 * will grab a config lock that is held across I/O,
1025 * issue async to avoid deadlock.
1027 if (((1U << stage
) & ZIO_CONFIG_LOCK_BLOCKING_STAGES
) &&
1028 zio
->io_vd
== NULL
&&
1029 zio_taskq_member(zio
, ZIO_TASKQ_INTERRUPT
)) {
1030 zio_taskq_dispatch(zio
, ZIO_TASKQ_ISSUE
);
1034 zio
->io_stage
= stage
;
1035 rv
= zio_pipeline
[stage
](zio
);
1037 if (rv
== ZIO_PIPELINE_STOP
)
1040 ASSERT(rv
== ZIO_PIPELINE_CONTINUE
);
1045 * ==========================================================================
1046 * Initiate I/O, either sync or async
1047 * ==========================================================================
1050 zio_wait(zio_t
*zio
)
1054 ASSERT(zio
->io_stage
== ZIO_STAGE_OPEN
);
1055 ASSERT(zio
->io_executor
== NULL
);
1057 zio
->io_waiter
= curthread
;
1061 mutex_enter(&zio
->io_lock
);
1062 while (zio
->io_executor
!= NULL
)
1063 cv_wait(&zio
->io_cv
, &zio
->io_lock
);
1064 mutex_exit(&zio
->io_lock
);
1066 error
= zio
->io_error
;
1073 zio_nowait(zio_t
*zio
)
1075 ASSERT(zio
->io_executor
== NULL
);
1077 if (zio
->io_child_type
== ZIO_CHILD_LOGICAL
&&
1078 zio_unique_parent(zio
) == NULL
) {
1080 * This is a logical async I/O with no parent to wait for it.
1081 * We add it to the spa_async_root_zio "Godfather" I/O which
1082 * will ensure they complete prior to unloading the pool.
1084 spa_t
*spa
= zio
->io_spa
;
1086 zio_add_child(spa
->spa_async_zio_root
, zio
);
1093 * ==========================================================================
1094 * Reexecute or suspend/resume failed I/O
1095 * ==========================================================================
1099 zio_reexecute(zio_t
*pio
)
1101 zio_t
*cio
, *cio_next
;
1103 ASSERT(pio
->io_child_type
== ZIO_CHILD_LOGICAL
);
1104 ASSERT(pio
->io_orig_stage
== ZIO_STAGE_OPEN
);
1105 ASSERT(pio
->io_gang_leader
== NULL
);
1106 ASSERT(pio
->io_gang_tree
== NULL
);
1108 pio
->io_flags
= pio
->io_orig_flags
;
1109 pio
->io_stage
= pio
->io_orig_stage
;
1110 pio
->io_pipeline
= pio
->io_orig_pipeline
;
1111 pio
->io_reexecute
= 0;
1113 for (int w
= 0; w
< ZIO_WAIT_TYPES
; w
++)
1114 pio
->io_state
[w
] = 0;
1115 for (int c
= 0; c
< ZIO_CHILD_TYPES
; c
++)
1116 pio
->io_child_error
[c
] = 0;
1118 if (IO_IS_ALLOCATING(pio
)) {
1120 * Remember the failed bp so that the io_ready() callback
1121 * can update its accounting upon reexecution. The block
1122 * was already freed in zio_done(); we indicate this with
1123 * a fill count of -1 so that zio_free() knows to skip it.
1125 blkptr_t
*bp
= pio
->io_bp
;
1126 ASSERT(bp
->blk_birth
== 0 || bp
->blk_birth
== pio
->io_txg
);
1127 bp
->blk_fill
= BLK_FILL_ALREADY_FREED
;
1128 pio
->io_bp_orig
= *bp
;
1133 * As we reexecute pio's children, new children could be created.
1134 * New children go to the head of pio's io_child_list, however,
1135 * so we will (correctly) not reexecute them. The key is that
1136 * the remainder of pio's io_child_list, from 'cio_next' onward,
1137 * cannot be affected by any side effects of reexecuting 'cio'.
1139 for (cio
= zio_walk_children(pio
); cio
!= NULL
; cio
= cio_next
) {
1140 cio_next
= zio_walk_children(pio
);
1141 mutex_enter(&pio
->io_lock
);
1142 for (int w
= 0; w
< ZIO_WAIT_TYPES
; w
++)
1143 pio
->io_children
[cio
->io_child_type
][w
]++;
1144 mutex_exit(&pio
->io_lock
);
1149 * Now that all children have been reexecuted, execute the parent.
1150 * We don't reexecute "The Godfather" I/O here as it's the
1151 * responsibility of the caller to wait on him.
1153 if (!(pio
->io_flags
& ZIO_FLAG_GODFATHER
))
1158 zio_suspend(spa_t
*spa
, zio_t
*zio
)
1160 if (spa_get_failmode(spa
) == ZIO_FAILURE_MODE_PANIC
)
1161 fm_panic("Pool '%s' has encountered an uncorrectable I/O "
1162 "failure and the failure mode property for this pool "
1163 "is set to panic.", spa_name(spa
));
1165 zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE
, spa
, NULL
, NULL
, 0, 0);
1167 mutex_enter(&spa
->spa_suspend_lock
);
1169 if (spa
->spa_suspend_zio_root
== NULL
)
1170 spa
->spa_suspend_zio_root
= zio_root(spa
, NULL
, NULL
,
1171 ZIO_FLAG_CANFAIL
| ZIO_FLAG_SPECULATIVE
|
1172 ZIO_FLAG_GODFATHER
);
1174 spa
->spa_suspended
= B_TRUE
;
1177 ASSERT(!(zio
->io_flags
& ZIO_FLAG_GODFATHER
));
1178 ASSERT(zio
!= spa
->spa_suspend_zio_root
);
1179 ASSERT(zio
->io_child_type
== ZIO_CHILD_LOGICAL
);
1180 ASSERT(zio_unique_parent(zio
) == NULL
);
1181 ASSERT(zio
->io_stage
== ZIO_STAGE_DONE
);
1182 zio_add_child(spa
->spa_suspend_zio_root
, zio
);
1185 mutex_exit(&spa
->spa_suspend_lock
);
1189 zio_resume(spa_t
*spa
)
1194 * Reexecute all previously suspended i/o.
1196 mutex_enter(&spa
->spa_suspend_lock
);
1197 spa
->spa_suspended
= B_FALSE
;
1198 cv_broadcast(&spa
->spa_suspend_cv
);
1199 pio
= spa
->spa_suspend_zio_root
;
1200 spa
->spa_suspend_zio_root
= NULL
;
1201 mutex_exit(&spa
->spa_suspend_lock
);
1207 return (zio_wait(pio
));
1211 zio_resume_wait(spa_t
*spa
)
1213 mutex_enter(&spa
->spa_suspend_lock
);
1214 while (spa_suspended(spa
))
1215 cv_wait(&spa
->spa_suspend_cv
, &spa
->spa_suspend_lock
);
1216 mutex_exit(&spa
->spa_suspend_lock
);
1220 * ==========================================================================
1223 * A gang block is a collection of small blocks that looks to the DMU
1224 * like one large block. When zio_dva_allocate() cannot find a block
1225 * of the requested size, due to either severe fragmentation or the pool
1226 * being nearly full, it calls zio_write_gang_block() to construct the
1227 * block from smaller fragments.
1229 * A gang block consists of a gang header (zio_gbh_phys_t) and up to
1230 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like
1231 * an indirect block: it's an array of block pointers. It consumes
1232 * only one sector and hence is allocatable regardless of fragmentation.
1233 * The gang header's bps point to its gang members, which hold the data.
1235 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg>
1236 * as the verifier to ensure uniqueness of the SHA256 checksum.
1237 * Critically, the gang block bp's blk_cksum is the checksum of the data,
1238 * not the gang header. This ensures that data block signatures (needed for
1239 * deduplication) are independent of how the block is physically stored.
1241 * Gang blocks can be nested: a gang member may itself be a gang block.
1242 * Thus every gang block is a tree in which root and all interior nodes are
1243 * gang headers, and the leaves are normal blocks that contain user data.
1244 * The root of the gang tree is called the gang leader.
1246 * To perform any operation (read, rewrite, free, claim) on a gang block,
1247 * zio_gang_assemble() first assembles the gang tree (minus data leaves)
1248 * in the io_gang_tree field of the original logical i/o by recursively
1249 * reading the gang leader and all gang headers below it. This yields
1250 * an in-core tree containing the contents of every gang header and the
1251 * bps for every constituent of the gang block.
1253 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree
1254 * and invokes a callback on each bp. To free a gang block, zio_gang_issue()
1255 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp.
1256 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim().
1257 * zio_read_gang() is a wrapper around zio_read() that omits reading gang
1258 * headers, since we already have those in io_gang_tree. zio_rewrite_gang()
1259 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite()
1260 * of the gang header plus zio_checksum_compute() of the data to update the
1261 * gang header's blk_cksum as described above.
1263 * The two-phase assemble/issue model solves the problem of partial failure --
1264 * what if you'd freed part of a gang block but then couldn't read the
1265 * gang header for another part? Assembling the entire gang tree first
1266 * ensures that all the necessary gang header I/O has succeeded before
1267 * starting the actual work of free, claim, or write. Once the gang tree
1268 * is assembled, free and claim are in-memory operations that cannot fail.
1270 * In the event that a gang write fails, zio_dva_unallocate() walks the
1271 * gang tree to immediately free (i.e. insert back into the space map)
1272 * everything we've allocated. This ensures that we don't get ENOSPC
1273 * errors during repeated suspend/resume cycles due to a flaky device.
1275 * Gang rewrites only happen during sync-to-convergence. If we can't assemble
1276 * the gang tree, we won't modify the block, so we can safely defer the free
1277 * (knowing that the block is still intact). If we *can* assemble the gang
1278 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free
1279 * each constituent bp and we can allocate a new block on the next sync pass.
1281 * In all cases, the gang tree allows complete recovery from partial failure.
1282 * ==========================================================================
1286 zio_read_gang(zio_t
*pio
, blkptr_t
*bp
, zio_gang_node_t
*gn
, void *data
)
1291 return (zio_read(pio
, pio
->io_spa
, bp
, data
, BP_GET_PSIZE(bp
),
1292 NULL
, NULL
, pio
->io_priority
, ZIO_GANG_CHILD_FLAGS(pio
),
1293 &pio
->io_bookmark
));
1297 zio_rewrite_gang(zio_t
*pio
, blkptr_t
*bp
, zio_gang_node_t
*gn
, void *data
)
1302 zio
= zio_rewrite(pio
, pio
->io_spa
, pio
->io_txg
, bp
,
1303 gn
->gn_gbh
, SPA_GANGBLOCKSIZE
, NULL
, NULL
, pio
->io_priority
,
1304 ZIO_GANG_CHILD_FLAGS(pio
), &pio
->io_bookmark
);
1306 * As we rewrite each gang header, the pipeline will compute
1307 * a new gang block header checksum for it; but no one will
1308 * compute a new data checksum, so we do that here. The one
1309 * exception is the gang leader: the pipeline already computed
1310 * its data checksum because that stage precedes gang assembly.
1311 * (Presently, nothing actually uses interior data checksums;
1312 * this is just good hygiene.)
1314 if (gn
!= pio
->io_gang_leader
->io_gang_tree
) {
1315 zio_checksum_compute(zio
, BP_GET_CHECKSUM(bp
),
1316 data
, BP_GET_PSIZE(bp
));
1319 zio
= zio_rewrite(pio
, pio
->io_spa
, pio
->io_txg
, bp
,
1320 data
, BP_GET_PSIZE(bp
), NULL
, NULL
, pio
->io_priority
,
1321 ZIO_GANG_CHILD_FLAGS(pio
), &pio
->io_bookmark
);
1329 zio_free_gang(zio_t
*pio
, blkptr_t
*bp
, zio_gang_node_t
*gn
, void *data
)
1331 return (zio_free(pio
, pio
->io_spa
, pio
->io_txg
, bp
,
1332 NULL
, NULL
, ZIO_GANG_CHILD_FLAGS(pio
)));
1337 zio_claim_gang(zio_t
*pio
, blkptr_t
*bp
, zio_gang_node_t
*gn
, void *data
)
1339 return (zio_claim(pio
, pio
->io_spa
, pio
->io_txg
, bp
,
1340 NULL
, NULL
, ZIO_GANG_CHILD_FLAGS(pio
)));
1343 static zio_gang_issue_func_t
*zio_gang_issue_func
[ZIO_TYPES
] = {
1352 static void zio_gang_tree_assemble_done(zio_t
*zio
);
1354 static zio_gang_node_t
*
1355 zio_gang_node_alloc(zio_gang_node_t
**gnpp
)
1357 zio_gang_node_t
*gn
;
1359 ASSERT(*gnpp
== NULL
);
1361 gn
= kmem_zalloc(sizeof (*gn
), KM_SLEEP
);
1362 gn
->gn_gbh
= zio_buf_alloc(SPA_GANGBLOCKSIZE
);
1369 zio_gang_node_free(zio_gang_node_t
**gnpp
)
1371 zio_gang_node_t
*gn
= *gnpp
;
1373 for (int g
= 0; g
< SPA_GBH_NBLKPTRS
; g
++)
1374 ASSERT(gn
->gn_child
[g
] == NULL
);
1376 zio_buf_free(gn
->gn_gbh
, SPA_GANGBLOCKSIZE
);
1377 kmem_free(gn
, sizeof (*gn
));
1382 zio_gang_tree_free(zio_gang_node_t
**gnpp
)
1384 zio_gang_node_t
*gn
= *gnpp
;
1389 for (int g
= 0; g
< SPA_GBH_NBLKPTRS
; g
++)
1390 zio_gang_tree_free(&gn
->gn_child
[g
]);
1392 zio_gang_node_free(gnpp
);
1396 zio_gang_tree_assemble(zio_t
*gio
, blkptr_t
*bp
, zio_gang_node_t
**gnpp
)
1398 zio_gang_node_t
*gn
= zio_gang_node_alloc(gnpp
);
1400 ASSERT(gio
->io_gang_leader
== gio
);
1401 ASSERT(BP_IS_GANG(bp
));
1403 zio_nowait(zio_read(gio
, gio
->io_spa
, bp
, gn
->gn_gbh
,
1404 SPA_GANGBLOCKSIZE
, zio_gang_tree_assemble_done
, gn
,
1405 gio
->io_priority
, ZIO_GANG_CHILD_FLAGS(gio
), &gio
->io_bookmark
));
1409 zio_gang_tree_assemble_done(zio_t
*zio
)
1411 zio_t
*gio
= zio
->io_gang_leader
;
1412 zio_gang_node_t
*gn
= zio
->io_private
;
1413 blkptr_t
*bp
= zio
->io_bp
;
1415 ASSERT(gio
== zio_unique_parent(zio
));
1416 ASSERT(zio_walk_children(zio
) == NULL
);
1421 if (BP_SHOULD_BYTESWAP(bp
))
1422 byteswap_uint64_array(zio
->io_data
, zio
->io_size
);
1424 ASSERT(zio
->io_data
== gn
->gn_gbh
);
1425 ASSERT(zio
->io_size
== SPA_GANGBLOCKSIZE
);
1426 ASSERT(gn
->gn_gbh
->zg_tail
.zbt_magic
== ZBT_MAGIC
);
1428 for (int g
= 0; g
< SPA_GBH_NBLKPTRS
; g
++) {
1429 blkptr_t
*gbp
= &gn
->gn_gbh
->zg_blkptr
[g
];
1430 if (!BP_IS_GANG(gbp
))
1432 zio_gang_tree_assemble(gio
, gbp
, &gn
->gn_child
[g
]);
1437 zio_gang_tree_issue(zio_t
*pio
, zio_gang_node_t
*gn
, blkptr_t
*bp
, void *data
)
1439 zio_t
*gio
= pio
->io_gang_leader
;
1442 ASSERT(BP_IS_GANG(bp
) == !!gn
);
1443 ASSERT(BP_GET_CHECKSUM(bp
) == BP_GET_CHECKSUM(gio
->io_bp
));
1444 ASSERT(BP_GET_LSIZE(bp
) == BP_GET_PSIZE(bp
) || gn
== gio
->io_gang_tree
);
1447 * If you're a gang header, your data is in gn->gn_gbh.
1448 * If you're a gang member, your data is in 'data' and gn == NULL.
1450 zio
= zio_gang_issue_func
[gio
->io_type
](pio
, bp
, gn
, data
);
1453 ASSERT(gn
->gn_gbh
->zg_tail
.zbt_magic
== ZBT_MAGIC
);
1455 for (int g
= 0; g
< SPA_GBH_NBLKPTRS
; g
++) {
1456 blkptr_t
*gbp
= &gn
->gn_gbh
->zg_blkptr
[g
];
1457 if (BP_IS_HOLE(gbp
))
1459 zio_gang_tree_issue(zio
, gn
->gn_child
[g
], gbp
, data
);
1460 data
= (char *)data
+ BP_GET_PSIZE(gbp
);
1464 if (gn
== gio
->io_gang_tree
)
1465 ASSERT3P((char *)gio
->io_data
+ gio
->io_size
, ==, data
);
1472 zio_gang_assemble(zio_t
*zio
)
1474 blkptr_t
*bp
= zio
->io_bp
;
1476 ASSERT(BP_IS_GANG(bp
) && zio
->io_gang_leader
== NULL
);
1477 ASSERT(zio
->io_child_type
> ZIO_CHILD_GANG
);
1479 zio
->io_gang_leader
= zio
;
1481 zio_gang_tree_assemble(zio
, bp
, &zio
->io_gang_tree
);
1483 return (ZIO_PIPELINE_CONTINUE
);
1487 zio_gang_issue(zio_t
*zio
)
1489 blkptr_t
*bp
= zio
->io_bp
;
1491 if (zio_wait_for_children(zio
, ZIO_CHILD_GANG
, ZIO_WAIT_DONE
))
1492 return (ZIO_PIPELINE_STOP
);
1494 ASSERT(BP_IS_GANG(bp
) && zio
->io_gang_leader
== zio
);
1495 ASSERT(zio
->io_child_type
> ZIO_CHILD_GANG
);
1497 if (zio
->io_child_error
[ZIO_CHILD_GANG
] == 0)
1498 zio_gang_tree_issue(zio
, zio
->io_gang_tree
, bp
, zio
->io_data
);
1500 zio_gang_tree_free(&zio
->io_gang_tree
);
1502 zio
->io_pipeline
= ZIO_INTERLOCK_PIPELINE
;
1504 return (ZIO_PIPELINE_CONTINUE
);
1508 zio_write_gang_member_ready(zio_t
*zio
)
1510 zio_t
*pio
= zio_unique_parent(zio
);
1511 zio_t
*gio
= zio
->io_gang_leader
;
1512 dva_t
*cdva
= zio
->io_bp
->blk_dva
;
1513 dva_t
*pdva
= pio
->io_bp
->blk_dva
;
1516 if (BP_IS_HOLE(zio
->io_bp
))
1519 ASSERT(BP_IS_HOLE(&zio
->io_bp_orig
));
1521 ASSERT(zio
->io_child_type
== ZIO_CHILD_GANG
);
1522 ASSERT3U(zio
->io_prop
.zp_ndvas
, ==, gio
->io_prop
.zp_ndvas
);
1523 ASSERT3U(zio
->io_prop
.zp_ndvas
, <=, BP_GET_NDVAS(zio
->io_bp
));
1524 ASSERT3U(pio
->io_prop
.zp_ndvas
, <=, BP_GET_NDVAS(pio
->io_bp
));
1525 ASSERT3U(BP_GET_NDVAS(zio
->io_bp
), <=, BP_GET_NDVAS(pio
->io_bp
));
1527 mutex_enter(&pio
->io_lock
);
1528 for (int d
= 0; d
< BP_GET_NDVAS(zio
->io_bp
); d
++) {
1529 ASSERT(DVA_GET_GANG(&pdva
[d
]));
1530 asize
= DVA_GET_ASIZE(&pdva
[d
]);
1531 asize
+= DVA_GET_ASIZE(&cdva
[d
]);
1532 DVA_SET_ASIZE(&pdva
[d
], asize
);
1534 mutex_exit(&pio
->io_lock
);
1538 zio_write_gang_block(zio_t
*pio
)
1540 spa_t
*spa
= pio
->io_spa
;
1541 blkptr_t
*bp
= pio
->io_bp
;
1542 zio_t
*gio
= pio
->io_gang_leader
;
1544 zio_gang_node_t
*gn
, **gnpp
;
1545 zio_gbh_phys_t
*gbh
;
1546 uint64_t txg
= pio
->io_txg
;
1547 uint64_t resid
= pio
->io_size
;
1549 int ndvas
= gio
->io_prop
.zp_ndvas
;
1550 int gbh_ndvas
= MIN(ndvas
+ 1, spa_max_replication(spa
));
1554 error
= metaslab_alloc(spa
, spa
->spa_normal_class
, SPA_GANGBLOCKSIZE
,
1555 bp
, gbh_ndvas
, txg
, pio
== gio
? NULL
: gio
->io_bp
,
1556 METASLAB_HINTBP_FAVOR
| METASLAB_GANG_HEADER
);
1558 pio
->io_error
= error
;
1559 return (ZIO_PIPELINE_CONTINUE
);
1563 gnpp
= &gio
->io_gang_tree
;
1565 gnpp
= pio
->io_private
;
1566 ASSERT(pio
->io_ready
== zio_write_gang_member_ready
);
1569 gn
= zio_gang_node_alloc(gnpp
);
1571 bzero(gbh
, SPA_GANGBLOCKSIZE
);
1574 * Create the gang header.
1576 zio
= zio_rewrite(pio
, spa
, txg
, bp
, gbh
, SPA_GANGBLOCKSIZE
, NULL
, NULL
,
1577 pio
->io_priority
, ZIO_GANG_CHILD_FLAGS(pio
), &pio
->io_bookmark
);
1580 * Create and nowait the gang children.
1582 for (int g
= 0; resid
!= 0; resid
-= lsize
, g
++) {
1583 lsize
= P2ROUNDUP(resid
/ (SPA_GBH_NBLKPTRS
- g
),
1585 ASSERT(lsize
>= SPA_MINBLOCKSIZE
&& lsize
<= resid
);
1587 zp
.zp_checksum
= gio
->io_prop
.zp_checksum
;
1588 zp
.zp_compress
= ZIO_COMPRESS_OFF
;
1589 zp
.zp_type
= DMU_OT_NONE
;
1591 zp
.zp_ndvas
= gio
->io_prop
.zp_ndvas
;
1593 zio_nowait(zio_write(zio
, spa
, txg
, &gbh
->zg_blkptr
[g
],
1594 (char *)pio
->io_data
+ (pio
->io_size
- resid
), lsize
, &zp
,
1595 zio_write_gang_member_ready
, NULL
, &gn
->gn_child
[g
],
1596 pio
->io_priority
, ZIO_GANG_CHILD_FLAGS(pio
),
1597 &pio
->io_bookmark
));
1601 * Set pio's pipeline to just wait for zio to finish.
1603 pio
->io_pipeline
= ZIO_INTERLOCK_PIPELINE
;
1607 return (ZIO_PIPELINE_CONTINUE
);
1611 * ==========================================================================
1612 * Allocate and free blocks
1613 * ==========================================================================
1617 zio_dva_allocate(zio_t
*zio
)
1619 spa_t
*spa
= zio
->io_spa
;
1620 metaslab_class_t
*mc
= spa
->spa_normal_class
;
1621 blkptr_t
*bp
= zio
->io_bp
;
1624 if (zio
->io_gang_leader
== NULL
) {
1625 ASSERT(zio
->io_child_type
> ZIO_CHILD_GANG
);
1626 zio
->io_gang_leader
= zio
;
1629 ASSERT(BP_IS_HOLE(bp
));
1630 ASSERT3U(BP_GET_NDVAS(bp
), ==, 0);
1631 ASSERT3U(zio
->io_prop
.zp_ndvas
, >, 0);
1632 ASSERT3U(zio
->io_prop
.zp_ndvas
, <=, spa_max_replication(spa
));
1633 ASSERT3U(zio
->io_size
, ==, BP_GET_PSIZE(bp
));
1635 error
= metaslab_alloc(spa
, mc
, zio
->io_size
, bp
,
1636 zio
->io_prop
.zp_ndvas
, zio
->io_txg
, NULL
, 0);
1639 if (error
== ENOSPC
&& zio
->io_size
> SPA_MINBLOCKSIZE
)
1640 return (zio_write_gang_block(zio
));
1641 zio
->io_error
= error
;
1644 return (ZIO_PIPELINE_CONTINUE
);
1648 zio_dva_free(zio_t
*zio
)
1650 metaslab_free(zio
->io_spa
, zio
->io_bp
, zio
->io_txg
, B_FALSE
);
1652 return (ZIO_PIPELINE_CONTINUE
);
1656 zio_dva_claim(zio_t
*zio
)
1660 error
= metaslab_claim(zio
->io_spa
, zio
->io_bp
, zio
->io_txg
);
1662 zio
->io_error
= error
;
1664 return (ZIO_PIPELINE_CONTINUE
);
1668 * Undo an allocation. This is used by zio_done() when an I/O fails
1669 * and we want to give back the block we just allocated.
1670 * This handles both normal blocks and gang blocks.
1673 zio_dva_unallocate(zio_t
*zio
, zio_gang_node_t
*gn
, blkptr_t
*bp
)
1675 spa_t
*spa
= zio
->io_spa
;
1676 boolean_t now
= !(zio
->io_flags
& ZIO_FLAG_IO_REWRITE
);
1678 ASSERT(bp
->blk_birth
== zio
->io_txg
|| BP_IS_HOLE(bp
));
1680 if (zio
->io_bp
== bp
&& !now
) {
1682 * This is a rewrite for sync-to-convergence.
1683 * We can't do a metaslab_free(NOW) because bp wasn't allocated
1684 * during this sync pass, which means that metaslab_sync()
1685 * already committed the allocation.
1687 ASSERT(DVA_EQUAL(BP_IDENTITY(bp
),
1688 BP_IDENTITY(&zio
->io_bp_orig
)));
1689 ASSERT(spa_sync_pass(spa
) > 1);
1691 if (BP_IS_GANG(bp
) && gn
== NULL
) {
1693 * This is a gang leader whose gang header(s) we
1694 * couldn't read now, so defer the free until later.
1695 * The block should still be intact because without
1696 * the headers, we'd never even start the rewrite.
1698 bplist_enqueue_deferred(&spa
->spa_sync_bplist
, bp
);
1703 if (!BP_IS_HOLE(bp
))
1704 metaslab_free(spa
, bp
, bp
->blk_birth
, now
);
1707 for (int g
= 0; g
< SPA_GBH_NBLKPTRS
; g
++) {
1708 zio_dva_unallocate(zio
, gn
->gn_child
[g
],
1709 &gn
->gn_gbh
->zg_blkptr
[g
]);
1715 * Try to allocate an intent log block. Return 0 on success, errno on failure.
1718 zio_alloc_blk(spa_t
*spa
, uint64_t size
, blkptr_t
*new_bp
, blkptr_t
*old_bp
,
1723 error
= metaslab_alloc(spa
, spa
->spa_log_class
, size
,
1724 new_bp
, 1, txg
, old_bp
, METASLAB_HINTBP_AVOID
);
1727 error
= metaslab_alloc(spa
, spa
->spa_normal_class
, size
,
1728 new_bp
, 1, txg
, old_bp
, METASLAB_HINTBP_AVOID
);
1731 BP_SET_LSIZE(new_bp
, size
);
1732 BP_SET_PSIZE(new_bp
, size
);
1733 BP_SET_COMPRESS(new_bp
, ZIO_COMPRESS_OFF
);
1734 BP_SET_CHECKSUM(new_bp
, ZIO_CHECKSUM_ZILOG
);
1735 BP_SET_TYPE(new_bp
, DMU_OT_INTENT_LOG
);
1736 BP_SET_LEVEL(new_bp
, 0);
1737 BP_SET_BYTEORDER(new_bp
, ZFS_HOST_BYTEORDER
);
1744 * Free an intent log block. We know it can't be a gang block, so there's
1745 * nothing to do except metaslab_free() it.
1748 zio_free_blk(spa_t
*spa
, blkptr_t
*bp
, uint64_t txg
)
1750 ASSERT(!BP_IS_GANG(bp
));
1752 metaslab_free(spa
, bp
, txg
, B_FALSE
);
1756 * ==========================================================================
1757 * Read and write to physical devices
1758 * ==========================================================================
1761 zio_vdev_io_start(zio_t
*zio
)
1763 vdev_t
*vd
= zio
->io_vd
;
1765 spa_t
*spa
= zio
->io_spa
;
1767 ASSERT(zio
->io_error
== 0);
1768 ASSERT(zio
->io_child_error
[ZIO_CHILD_VDEV
] == 0);
1771 if (!(zio
->io_flags
& ZIO_FLAG_CONFIG_WRITER
))
1772 spa_config_enter(spa
, SCL_ZIO
, zio
, RW_READER
);
1775 * The mirror_ops handle multiple DVAs in a single BP.
1777 return (vdev_mirror_ops
.vdev_op_io_start(zio
));
1780 align
= 1ULL << vd
->vdev_top
->vdev_ashift
;
1782 if (P2PHASE(zio
->io_size
, align
) != 0) {
1783 uint64_t asize
= P2ROUNDUP(zio
->io_size
, align
);
1784 char *abuf
= zio_buf_alloc(asize
);
1785 ASSERT(vd
== vd
->vdev_top
);
1786 if (zio
->io_type
== ZIO_TYPE_WRITE
) {
1787 bcopy(zio
->io_data
, abuf
, zio
->io_size
);
1788 bzero(abuf
+ zio
->io_size
, asize
- zio
->io_size
);
1790 zio_push_transform(zio
, abuf
, asize
, asize
, zio_subblock
);
1793 ASSERT(P2PHASE(zio
->io_offset
, align
) == 0);
1794 ASSERT(P2PHASE(zio
->io_size
, align
) == 0);
1795 ASSERT(zio
->io_type
!= ZIO_TYPE_WRITE
|| spa_writeable(spa
));
1798 * If this is a repair I/O, and there's no self-healing involved --
1799 * that is, we're just resilvering what we expect to resilver --
1800 * then don't do the I/O unless zio's txg is actually in vd's DTL.
1801 * This prevents spurious resilvering with nested replication.
1802 * For example, given a mirror of mirrors, (A+B)+(C+D), if only
1803 * A is out of date, we'll read from C+D, then use the data to
1804 * resilver A+B -- but we don't actually want to resilver B, just A.
1805 * The top-level mirror has no way to know this, so instead we just
1806 * discard unnecessary repairs as we work our way down the vdev tree.
1807 * The same logic applies to any form of nested replication:
1808 * ditto + mirror, RAID-Z + replacing, etc. This covers them all.
1810 if ((zio
->io_flags
& ZIO_FLAG_IO_REPAIR
) &&
1811 !(zio
->io_flags
& ZIO_FLAG_SELF_HEAL
) &&
1812 zio
->io_txg
!= 0 && /* not a delegated i/o */
1813 !vdev_dtl_contains(vd
, DTL_PARTIAL
, zio
->io_txg
, 1)) {
1814 ASSERT(zio
->io_type
== ZIO_TYPE_WRITE
);
1815 zio_vdev_io_bypass(zio
);
1816 return (ZIO_PIPELINE_CONTINUE
);
1819 if (vd
->vdev_ops
->vdev_op_leaf
&&
1820 (zio
->io_type
== ZIO_TYPE_READ
|| zio
->io_type
== ZIO_TYPE_WRITE
)) {
1822 if (zio
->io_type
== ZIO_TYPE_READ
&& vdev_cache_read(zio
) == 0)
1823 return (ZIO_PIPELINE_CONTINUE
);
1825 if ((zio
= vdev_queue_io(zio
)) == NULL
)
1826 return (ZIO_PIPELINE_STOP
);
1828 if (!vdev_accessible(vd
, zio
)) {
1829 zio
->io_error
= ENXIO
;
1831 return (ZIO_PIPELINE_STOP
);
1835 return (vd
->vdev_ops
->vdev_op_io_start(zio
));
1839 zio_vdev_io_done(zio_t
*zio
)
1841 vdev_t
*vd
= zio
->io_vd
;
1842 vdev_ops_t
*ops
= vd
? vd
->vdev_ops
: &vdev_mirror_ops
;
1843 boolean_t unexpected_error
= B_FALSE
;
1845 if (zio_wait_for_children(zio
, ZIO_CHILD_VDEV
, ZIO_WAIT_DONE
))
1846 return (ZIO_PIPELINE_STOP
);
1848 ASSERT(zio
->io_type
== ZIO_TYPE_READ
|| zio
->io_type
== ZIO_TYPE_WRITE
);
1850 if (vd
!= NULL
&& vd
->vdev_ops
->vdev_op_leaf
) {
1852 vdev_queue_io_done(zio
);
1854 if (zio
->io_type
== ZIO_TYPE_WRITE
)
1855 vdev_cache_write(zio
);
1857 if (zio_injection_enabled
&& zio
->io_error
== 0)
1858 zio
->io_error
= zio_handle_device_injection(vd
,
1861 if (zio_injection_enabled
&& zio
->io_error
== 0)
1862 zio
->io_error
= zio_handle_label_injection(zio
, EIO
);
1864 if (zio
->io_error
) {
1865 if (!vdev_accessible(vd
, zio
)) {
1866 zio
->io_error
= ENXIO
;
1868 unexpected_error
= B_TRUE
;
1873 ops
->vdev_op_io_done(zio
);
1875 if (unexpected_error
)
1876 VERIFY(vdev_probe(vd
, zio
) == NULL
);
1878 return (ZIO_PIPELINE_CONTINUE
);
1882 zio_vdev_io_assess(zio_t
*zio
)
1884 vdev_t
*vd
= zio
->io_vd
;
1886 if (zio_wait_for_children(zio
, ZIO_CHILD_VDEV
, ZIO_WAIT_DONE
))
1887 return (ZIO_PIPELINE_STOP
);
1889 if (vd
== NULL
&& !(zio
->io_flags
& ZIO_FLAG_CONFIG_WRITER
))
1890 spa_config_exit(zio
->io_spa
, SCL_ZIO
, zio
);
1892 if (zio
->io_vsd
!= NULL
) {
1893 zio
->io_vsd_free(zio
);
1897 if (zio_injection_enabled
&& zio
->io_error
== 0)
1898 zio
->io_error
= zio_handle_fault_injection(zio
, EIO
);
1901 * If the I/O failed, determine whether we should attempt to retry it.
1903 if (zio
->io_error
&& vd
== NULL
&&
1904 !(zio
->io_flags
& (ZIO_FLAG_DONT_RETRY
| ZIO_FLAG_IO_RETRY
))) {
1905 ASSERT(!(zio
->io_flags
& ZIO_FLAG_DONT_QUEUE
)); /* not a leaf */
1906 ASSERT(!(zio
->io_flags
& ZIO_FLAG_IO_BYPASS
)); /* not a leaf */
1908 zio
->io_flags
|= ZIO_FLAG_IO_RETRY
|
1909 ZIO_FLAG_DONT_CACHE
| ZIO_FLAG_DONT_AGGREGATE
;
1910 zio
->io_stage
= ZIO_STAGE_VDEV_IO_START
- 1;
1911 zio_taskq_dispatch(zio
, ZIO_TASKQ_ISSUE
);
1912 return (ZIO_PIPELINE_STOP
);
1916 * If we got an error on a leaf device, convert it to ENXIO
1917 * if the device is not accessible at all.
1919 if (zio
->io_error
&& vd
!= NULL
&& vd
->vdev_ops
->vdev_op_leaf
&&
1920 !vdev_accessible(vd
, zio
))
1921 zio
->io_error
= ENXIO
;
1924 * If we can't write to an interior vdev (mirror or RAID-Z),
1925 * set vdev_cant_write so that we stop trying to allocate from it.
1927 if (zio
->io_error
== ENXIO
&& zio
->io_type
== ZIO_TYPE_WRITE
&&
1928 vd
!= NULL
&& !vd
->vdev_ops
->vdev_op_leaf
)
1929 vd
->vdev_cant_write
= B_TRUE
;
1932 zio
->io_pipeline
= ZIO_INTERLOCK_PIPELINE
;
1934 return (ZIO_PIPELINE_CONTINUE
);
1938 zio_vdev_io_reissue(zio_t
*zio
)
1940 ASSERT(zio
->io_stage
== ZIO_STAGE_VDEV_IO_START
);
1941 ASSERT(zio
->io_error
== 0);
1947 zio_vdev_io_redone(zio_t
*zio
)
1949 ASSERT(zio
->io_stage
== ZIO_STAGE_VDEV_IO_DONE
);
1955 zio_vdev_io_bypass(zio_t
*zio
)
1957 ASSERT(zio
->io_stage
== ZIO_STAGE_VDEV_IO_START
);
1958 ASSERT(zio
->io_error
== 0);
1960 zio
->io_flags
|= ZIO_FLAG_IO_BYPASS
;
1961 zio
->io_stage
= ZIO_STAGE_VDEV_IO_ASSESS
- 1;
1965 * ==========================================================================
1966 * Generate and verify checksums
1967 * ==========================================================================
1970 zio_checksum_generate(zio_t
*zio
)
1972 blkptr_t
*bp
= zio
->io_bp
;
1973 enum zio_checksum checksum
;
1977 * This is zio_write_phys().
1978 * We're either generating a label checksum, or none at all.
1980 checksum
= zio
->io_prop
.zp_checksum
;
1982 if (checksum
== ZIO_CHECKSUM_OFF
)
1983 return (ZIO_PIPELINE_CONTINUE
);
1985 ASSERT(checksum
== ZIO_CHECKSUM_LABEL
);
1987 if (BP_IS_GANG(bp
) && zio
->io_child_type
== ZIO_CHILD_GANG
) {
1988 ASSERT(!IO_IS_ALLOCATING(zio
));
1989 checksum
= ZIO_CHECKSUM_GANG_HEADER
;
1991 checksum
= BP_GET_CHECKSUM(bp
);
1995 zio_checksum_compute(zio
, checksum
, zio
->io_data
, zio
->io_size
);
1997 return (ZIO_PIPELINE_CONTINUE
);
2001 zio_checksum_verify(zio_t
*zio
)
2003 blkptr_t
*bp
= zio
->io_bp
;
2008 * This is zio_read_phys().
2009 * We're either verifying a label checksum, or nothing at all.
2011 if (zio
->io_prop
.zp_checksum
== ZIO_CHECKSUM_OFF
)
2012 return (ZIO_PIPELINE_CONTINUE
);
2014 ASSERT(zio
->io_prop
.zp_checksum
== ZIO_CHECKSUM_LABEL
);
2017 if ((error
= zio_checksum_error(zio
)) != 0) {
2018 zio
->io_error
= error
;
2019 if (!(zio
->io_flags
& ZIO_FLAG_SPECULATIVE
)) {
2020 zfs_ereport_post(FM_EREPORT_ZFS_CHECKSUM
,
2021 zio
->io_spa
, zio
->io_vd
, zio
, 0, 0);
2025 return (ZIO_PIPELINE_CONTINUE
);
2029 * Called by RAID-Z to ensure we don't compute the checksum twice.
2032 zio_checksum_verified(zio_t
*zio
)
2034 zio
->io_pipeline
&= ~(1U << ZIO_STAGE_CHECKSUM_VERIFY
);
2038 * ==========================================================================
2039 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
2040 * An error of 0 indictes success. ENXIO indicates whole-device failure,
2041 * which may be transient (e.g. unplugged) or permament. ECKSUM and EIO
2042 * indicate errors that are specific to one I/O, and most likely permanent.
2043 * Any other error is presumed to be worse because we weren't expecting it.
2044 * ==========================================================================
2047 zio_worst_error(int e1
, int e2
)
2049 static int zio_error_rank
[] = { 0, ENXIO
, ECKSUM
, EIO
};
2052 for (r1
= 0; r1
< sizeof (zio_error_rank
) / sizeof (int); r1
++)
2053 if (e1
== zio_error_rank
[r1
])
2056 for (r2
= 0; r2
< sizeof (zio_error_rank
) / sizeof (int); r2
++)
2057 if (e2
== zio_error_rank
[r2
])
2060 return (r1
> r2
? e1
: e2
);
2064 * ==========================================================================
2066 * ==========================================================================
2069 zio_ready(zio_t
*zio
)
2071 blkptr_t
*bp
= zio
->io_bp
;
2072 zio_t
*pio
, *pio_next
;
2074 if (zio_wait_for_children(zio
, ZIO_CHILD_GANG
, ZIO_WAIT_READY
))
2075 return (ZIO_PIPELINE_STOP
);
2077 if (zio
->io_ready
) {
2078 ASSERT(IO_IS_ALLOCATING(zio
));
2079 ASSERT(bp
->blk_birth
== zio
->io_txg
|| BP_IS_HOLE(bp
));
2080 ASSERT(zio
->io_children
[ZIO_CHILD_GANG
][ZIO_WAIT_READY
] == 0);
2085 if (bp
!= NULL
&& bp
!= &zio
->io_bp_copy
)
2086 zio
->io_bp_copy
= *bp
;
2089 zio
->io_pipeline
= ZIO_INTERLOCK_PIPELINE
;
2091 mutex_enter(&zio
->io_lock
);
2092 zio
->io_state
[ZIO_WAIT_READY
] = 1;
2093 pio
= zio_walk_parents(zio
);
2094 mutex_exit(&zio
->io_lock
);
2097 * As we notify zio's parents, new parents could be added.
2098 * New parents go to the head of zio's io_parent_list, however,
2099 * so we will (correctly) not notify them. The remainder of zio's
2100 * io_parent_list, from 'pio_next' onward, cannot change because
2101 * all parents must wait for us to be done before they can be done.
2103 for (; pio
!= NULL
; pio
= pio_next
) {
2104 pio_next
= zio_walk_parents(zio
);
2105 zio_notify_parent(pio
, zio
, ZIO_WAIT_READY
);
2108 return (ZIO_PIPELINE_CONTINUE
);
2112 zio_done(zio_t
*zio
)
2114 spa_t
*spa
= zio
->io_spa
;
2115 zio_t
*lio
= zio
->io_logical
;
2116 blkptr_t
*bp
= zio
->io_bp
;
2117 vdev_t
*vd
= zio
->io_vd
;
2118 uint64_t psize
= zio
->io_size
;
2119 zio_t
*pio
, *pio_next
;
2122 * If our children haven't all completed,
2123 * wait for them and then repeat this pipeline stage.
2125 if (zio_wait_for_children(zio
, ZIO_CHILD_VDEV
, ZIO_WAIT_DONE
) ||
2126 zio_wait_for_children(zio
, ZIO_CHILD_GANG
, ZIO_WAIT_DONE
) ||
2127 zio_wait_for_children(zio
, ZIO_CHILD_LOGICAL
, ZIO_WAIT_DONE
))
2128 return (ZIO_PIPELINE_STOP
);
2130 for (int c
= 0; c
< ZIO_CHILD_TYPES
; c
++)
2131 for (int w
= 0; w
< ZIO_WAIT_TYPES
; w
++)
2132 ASSERT(zio
->io_children
[c
][w
] == 0);
2135 ASSERT(bp
->blk_pad
[0] == 0);
2136 ASSERT(bp
->blk_pad
[1] == 0);
2137 ASSERT(bp
->blk_pad
[2] == 0);
2138 ASSERT(bcmp(bp
, &zio
->io_bp_copy
, sizeof (blkptr_t
)) == 0 ||
2139 (bp
== zio_unique_parent(zio
)->io_bp
));
2140 if (zio
->io_type
== ZIO_TYPE_WRITE
&& !BP_IS_HOLE(bp
) &&
2141 !(zio
->io_flags
& ZIO_FLAG_IO_REPAIR
)) {
2142 ASSERT(!BP_SHOULD_BYTESWAP(bp
));
2143 ASSERT3U(zio
->io_prop
.zp_ndvas
, <=, BP_GET_NDVAS(bp
));
2144 ASSERT(BP_COUNT_GANG(bp
) == 0 ||
2145 (BP_COUNT_GANG(bp
) == BP_GET_NDVAS(bp
)));
2150 * If there were child vdev or gang errors, they apply to us now.
2152 zio_inherit_child_errors(zio
, ZIO_CHILD_VDEV
);
2153 zio_inherit_child_errors(zio
, ZIO_CHILD_GANG
);
2155 zio_pop_transforms(zio
); /* note: may set zio->io_error */
2157 vdev_stat_update(zio
, psize
);
2159 if (zio
->io_error
) {
2161 * If this I/O is attached to a particular vdev,
2162 * generate an error message describing the I/O failure
2163 * at the block level. We ignore these errors if the
2164 * device is currently unavailable.
2166 if (zio
->io_error
!= ECKSUM
&& vd
!= NULL
&& !vdev_is_dead(vd
))
2167 zfs_ereport_post(FM_EREPORT_ZFS_IO
, spa
, vd
, zio
, 0, 0);
2169 if ((zio
->io_error
== EIO
||
2170 !(zio
->io_flags
& ZIO_FLAG_SPECULATIVE
)) && zio
== lio
) {
2172 * For logical I/O requests, tell the SPA to log the
2173 * error and generate a logical data ereport.
2175 spa_log_error(spa
, zio
);
2176 zfs_ereport_post(FM_EREPORT_ZFS_DATA
, spa
, NULL
, zio
,
2181 if (zio
->io_error
&& zio
== lio
) {
2183 * Determine whether zio should be reexecuted. This will
2184 * propagate all the way to the root via zio_notify_parent().
2186 ASSERT(vd
== NULL
&& bp
!= NULL
);
2188 if (IO_IS_ALLOCATING(zio
))
2189 if (zio
->io_error
!= ENOSPC
)
2190 zio
->io_reexecute
|= ZIO_REEXECUTE_NOW
;
2192 zio
->io_reexecute
|= ZIO_REEXECUTE_SUSPEND
;
2194 if ((zio
->io_type
== ZIO_TYPE_READ
||
2195 zio
->io_type
== ZIO_TYPE_FREE
) &&
2196 zio
->io_error
== ENXIO
&&
2197 spa
->spa_load_state
== SPA_LOAD_NONE
&&
2198 spa_get_failmode(spa
) != ZIO_FAILURE_MODE_CONTINUE
)
2199 zio
->io_reexecute
|= ZIO_REEXECUTE_SUSPEND
;
2201 if (!(zio
->io_flags
& ZIO_FLAG_CANFAIL
) && !zio
->io_reexecute
)
2202 zio
->io_reexecute
|= ZIO_REEXECUTE_SUSPEND
;
2206 * If there were logical child errors, they apply to us now.
2207 * We defer this until now to avoid conflating logical child
2208 * errors with errors that happened to the zio itself when
2209 * updating vdev stats and reporting FMA events above.
2211 zio_inherit_child_errors(zio
, ZIO_CHILD_LOGICAL
);
2213 if ((zio
->io_error
|| zio
->io_reexecute
) && IO_IS_ALLOCATING(zio
) &&
2214 zio
->io_child_type
== ZIO_CHILD_LOGICAL
) {
2215 ASSERT(zio
->io_child_type
!= ZIO_CHILD_GANG
);
2216 zio_dva_unallocate(zio
, zio
->io_gang_tree
, bp
);
2219 zio_gang_tree_free(&zio
->io_gang_tree
);
2222 * Godfather I/Os should never suspend.
2224 if ((zio
->io_flags
& ZIO_FLAG_GODFATHER
) &&
2225 (zio
->io_reexecute
& ZIO_REEXECUTE_SUSPEND
))
2226 zio
->io_reexecute
= 0;
2228 if (zio
->io_reexecute
) {
2230 * This is a logical I/O that wants to reexecute.
2232 * Reexecute is top-down. When an i/o fails, if it's not
2233 * the root, it simply notifies its parent and sticks around.
2234 * The parent, seeing that it still has children in zio_done(),
2235 * does the same. This percolates all the way up to the root.
2236 * The root i/o will reexecute or suspend the entire tree.
2238 * This approach ensures that zio_reexecute() honors
2239 * all the original i/o dependency relationships, e.g.
2240 * parents not executing until children are ready.
2242 ASSERT(zio
->io_child_type
== ZIO_CHILD_LOGICAL
);
2244 zio
->io_gang_leader
= NULL
;
2246 mutex_enter(&zio
->io_lock
);
2247 zio
->io_state
[ZIO_WAIT_DONE
] = 1;
2248 mutex_exit(&zio
->io_lock
);
2251 * "The Godfather" I/O monitors its children but is
2252 * not a true parent to them. It will track them through
2253 * the pipeline but severs its ties whenever they get into
2254 * trouble (e.g. suspended). This allows "The Godfather"
2255 * I/O to return status without blocking.
2257 for (pio
= zio_walk_parents(zio
); pio
!= NULL
; pio
= pio_next
) {
2258 zio_link_t
*zl
= zio
->io_walk_link
;
2259 pio_next
= zio_walk_parents(zio
);
2261 if ((pio
->io_flags
& ZIO_FLAG_GODFATHER
) &&
2262 (zio
->io_reexecute
& ZIO_REEXECUTE_SUSPEND
)) {
2263 zio_remove_child(pio
, zio
, zl
);
2264 zio_notify_parent(pio
, zio
, ZIO_WAIT_DONE
);
2268 if ((pio
= zio_unique_parent(zio
)) != NULL
) {
2270 * We're not a root i/o, so there's nothing to do
2271 * but notify our parent. Don't propagate errors
2272 * upward since we haven't permanently failed yet.
2274 ASSERT(!(zio
->io_flags
& ZIO_FLAG_GODFATHER
));
2275 zio
->io_flags
|= ZIO_FLAG_DONT_PROPAGATE
;
2276 zio_notify_parent(pio
, zio
, ZIO_WAIT_DONE
);
2277 } else if (zio
->io_reexecute
& ZIO_REEXECUTE_SUSPEND
) {
2279 * We'd fail again if we reexecuted now, so suspend
2280 * until conditions improve (e.g. device comes online).
2282 zio_suspend(spa
, zio
);
2285 * Reexecution is potentially a huge amount of work.
2286 * Hand it off to the otherwise-unused claim taskq.
2288 (void) taskq_dispatch(
2289 spa
->spa_zio_taskq
[ZIO_TYPE_CLAIM
][ZIO_TASKQ_ISSUE
],
2290 (task_func_t
*)zio_reexecute
, zio
, TQ_SLEEP
);
2292 return (ZIO_PIPELINE_STOP
);
2295 ASSERT(zio_walk_children(zio
) == NULL
);
2296 ASSERT(zio
->io_reexecute
== 0);
2297 ASSERT(zio
->io_error
== 0 || (zio
->io_flags
& ZIO_FLAG_CANFAIL
));
2300 * It is the responsibility of the done callback to ensure that this
2301 * particular zio is no longer discoverable for adoption, and as
2302 * such, cannot acquire any new parents.
2307 mutex_enter(&zio
->io_lock
);
2308 zio
->io_state
[ZIO_WAIT_DONE
] = 1;
2309 mutex_exit(&zio
->io_lock
);
2311 for (pio
= zio_walk_parents(zio
); pio
!= NULL
; pio
= pio_next
) {
2312 zio_link_t
*zl
= zio
->io_walk_link
;
2313 pio_next
= zio_walk_parents(zio
);
2314 zio_remove_child(pio
, zio
, zl
);
2315 zio_notify_parent(pio
, zio
, ZIO_WAIT_DONE
);
2318 if (zio
->io_waiter
!= NULL
) {
2319 mutex_enter(&zio
->io_lock
);
2320 zio
->io_executor
= NULL
;
2321 cv_broadcast(&zio
->io_cv
);
2322 mutex_exit(&zio
->io_lock
);
2327 return (ZIO_PIPELINE_STOP
);
2331 * ==========================================================================
2332 * I/O pipeline definition
2333 * ==========================================================================
2335 static zio_pipe_stage_t
*zio_pipeline
[ZIO_STAGES
] = {
2340 zio_checksum_generate
,
2350 zio_checksum_verify
,