4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/zfs_context.h>
27 #include <sys/fm/fs/zfs.h>
30 #include <sys/spa_impl.h>
31 #include <sys/vdev_impl.h>
32 #include <sys/zio_impl.h>
33 #include <sys/zio_compress.h>
34 #include <sys/zio_checksum.h>
37 * ==========================================================================
39 * ==========================================================================
41 uint8_t zio_priority_table
[ZIO_PRIORITY_TABLE_SIZE
] = {
42 0, /* ZIO_PRIORITY_NOW */
43 0, /* ZIO_PRIORITY_SYNC_READ */
44 0, /* ZIO_PRIORITY_SYNC_WRITE */
45 6, /* ZIO_PRIORITY_ASYNC_READ */
46 4, /* ZIO_PRIORITY_ASYNC_WRITE */
47 4, /* ZIO_PRIORITY_FREE */
48 0, /* ZIO_PRIORITY_CACHE_FILL */
49 0, /* ZIO_PRIORITY_LOG_WRITE */
50 10, /* ZIO_PRIORITY_RESILVER */
51 20, /* ZIO_PRIORITY_SCRUB */
55 * ==========================================================================
56 * I/O type descriptions
57 * ==========================================================================
59 char *zio_type_name
[ZIO_TYPES
] = {
60 "null", "read", "write", "free", "claim", "ioctl" };
62 #define SYNC_PASS_DEFERRED_FREE 1 /* defer frees after this pass */
63 #define SYNC_PASS_DONT_COMPRESS 4 /* don't compress after this pass */
64 #define SYNC_PASS_REWRITE 1 /* rewrite new bps after this pass */
67 * ==========================================================================
69 * ==========================================================================
71 kmem_cache_t
*zio_cache
;
72 kmem_cache_t
*zio_link_cache
;
73 kmem_cache_t
*zio_buf_cache
[SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
];
74 kmem_cache_t
*zio_data_buf_cache
[SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
];
77 extern vmem_t
*zio_alloc_arena
;
81 * An allocating zio is one that either currently has the DVA allocate
82 * stage set or will have it later in its lifetime.
84 #define IO_IS_ALLOCATING(zio) \
85 ((zio)->io_orig_pipeline & (1U << ZIO_STAGE_DVA_ALLOCATE))
91 vmem_t
*data_alloc_arena
= NULL
;
94 data_alloc_arena
= zio_alloc_arena
;
96 zio_cache
= kmem_cache_create("zio_cache",
97 sizeof (zio_t
), 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
98 zio_link_cache
= kmem_cache_create("zio_link_cache",
99 sizeof (zio_link_t
), 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
102 * For small buffers, we want a cache for each multiple of
103 * SPA_MINBLOCKSIZE. For medium-size buffers, we want a cache
104 * for each quarter-power of 2. For large buffers, we want
105 * a cache for each multiple of PAGESIZE.
107 for (c
= 0; c
< SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
; c
++) {
108 size_t size
= (c
+ 1) << SPA_MINBLOCKSHIFT
;
112 while (p2
& (p2
- 1))
115 if (size
<= 4 * SPA_MINBLOCKSIZE
) {
116 align
= SPA_MINBLOCKSIZE
;
117 } else if (P2PHASE(size
, PAGESIZE
) == 0) {
119 } else if (P2PHASE(size
, p2
>> 2) == 0) {
125 (void) sprintf(name
, "zio_buf_%lu", (ulong_t
)size
);
126 zio_buf_cache
[c
] = kmem_cache_create(name
, size
,
127 align
, NULL
, NULL
, NULL
, NULL
, NULL
, KMC_NODEBUG
);
129 (void) sprintf(name
, "zio_data_buf_%lu", (ulong_t
)size
);
130 zio_data_buf_cache
[c
] = kmem_cache_create(name
, size
,
131 align
, NULL
, NULL
, NULL
, NULL
, data_alloc_arena
,
137 ASSERT(zio_buf_cache
[c
] != NULL
);
138 if (zio_buf_cache
[c
- 1] == NULL
)
139 zio_buf_cache
[c
- 1] = zio_buf_cache
[c
];
141 ASSERT(zio_data_buf_cache
[c
] != NULL
);
142 if (zio_data_buf_cache
[c
- 1] == NULL
)
143 zio_data_buf_cache
[c
- 1] = zio_data_buf_cache
[c
];
153 kmem_cache_t
*last_cache
= NULL
;
154 kmem_cache_t
*last_data_cache
= NULL
;
156 for (c
= 0; c
< SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
; c
++) {
157 if (zio_buf_cache
[c
] != last_cache
) {
158 last_cache
= zio_buf_cache
[c
];
159 kmem_cache_destroy(zio_buf_cache
[c
]);
161 zio_buf_cache
[c
] = NULL
;
163 if (zio_data_buf_cache
[c
] != last_data_cache
) {
164 last_data_cache
= zio_data_buf_cache
[c
];
165 kmem_cache_destroy(zio_data_buf_cache
[c
]);
167 zio_data_buf_cache
[c
] = NULL
;
170 kmem_cache_destroy(zio_link_cache
);
171 kmem_cache_destroy(zio_cache
);
177 * ==========================================================================
178 * Allocate and free I/O buffers
179 * ==========================================================================
183 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a
184 * crashdump if the kernel panics, so use it judiciously. Obviously, it's
185 * useful to inspect ZFS metadata, but if possible, we should avoid keeping
186 * excess / transient data in-core during a crashdump.
189 zio_buf_alloc(size_t size
)
191 size_t c
= (size
- 1) >> SPA_MINBLOCKSHIFT
;
193 ASSERT(c
< SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
);
195 return (kmem_cache_alloc(zio_buf_cache
[c
], KM_PUSHPAGE
));
199 * Use zio_data_buf_alloc to allocate data. The data will not appear in a
200 * crashdump if the kernel panics. This exists so that we will limit the amount
201 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount
202 * of kernel heap dumped to disk when the kernel panics)
205 zio_data_buf_alloc(size_t size
)
207 size_t c
= (size
- 1) >> SPA_MINBLOCKSHIFT
;
209 ASSERT(c
< SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
);
211 return (kmem_cache_alloc(zio_data_buf_cache
[c
], KM_PUSHPAGE
));
215 zio_buf_free(void *buf
, size_t size
)
217 size_t c
= (size
- 1) >> SPA_MINBLOCKSHIFT
;
219 ASSERT(c
< SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
);
221 kmem_cache_free(zio_buf_cache
[c
], buf
);
225 zio_data_buf_free(void *buf
, size_t size
)
227 size_t c
= (size
- 1) >> SPA_MINBLOCKSHIFT
;
229 ASSERT(c
< SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
);
231 kmem_cache_free(zio_data_buf_cache
[c
], buf
);
235 * ==========================================================================
236 * Push and pop I/O transform buffers
237 * ==========================================================================
240 zio_push_transform(zio_t
*zio
, void *data
, uint64_t size
, uint64_t bufsize
,
241 zio_transform_func_t
*transform
)
243 zio_transform_t
*zt
= kmem_alloc(sizeof (zio_transform_t
), KM_SLEEP
);
245 zt
->zt_orig_data
= zio
->io_data
;
246 zt
->zt_orig_size
= zio
->io_size
;
247 zt
->zt_bufsize
= bufsize
;
248 zt
->zt_transform
= transform
;
250 zt
->zt_next
= zio
->io_transform_stack
;
251 zio
->io_transform_stack
= zt
;
258 zio_pop_transforms(zio_t
*zio
)
262 while ((zt
= zio
->io_transform_stack
) != NULL
) {
263 if (zt
->zt_transform
!= NULL
)
264 zt
->zt_transform(zio
,
265 zt
->zt_orig_data
, zt
->zt_orig_size
);
267 zio_buf_free(zio
->io_data
, zt
->zt_bufsize
);
269 zio
->io_data
= zt
->zt_orig_data
;
270 zio
->io_size
= zt
->zt_orig_size
;
271 zio
->io_transform_stack
= zt
->zt_next
;
273 kmem_free(zt
, sizeof (zio_transform_t
));
278 * ==========================================================================
279 * I/O transform callbacks for subblocks and decompression
280 * ==========================================================================
283 zio_subblock(zio_t
*zio
, void *data
, uint64_t size
)
285 ASSERT(zio
->io_size
> size
);
287 if (zio
->io_type
== ZIO_TYPE_READ
)
288 bcopy(zio
->io_data
, data
, size
);
292 zio_decompress(zio_t
*zio
, void *data
, uint64_t size
)
294 if (zio
->io_error
== 0 &&
295 zio_decompress_data(BP_GET_COMPRESS(zio
->io_bp
),
296 zio
->io_data
, zio
->io_size
, data
, size
) != 0)
301 * ==========================================================================
302 * I/O parent/child relationships and pipeline interlocks
303 * ==========================================================================
306 * NOTE - Callers to zio_walk_parents() and zio_walk_children must
307 * continue calling these functions until they return NULL.
308 * Otherwise, the next caller will pick up the list walk in
309 * some indeterminate state. (Otherwise every caller would
310 * have to pass in a cookie to keep the state represented by
311 * io_walk_link, which gets annoying.)
314 zio_walk_parents(zio_t
*cio
)
316 zio_link_t
*zl
= cio
->io_walk_link
;
317 list_t
*pl
= &cio
->io_parent_list
;
319 zl
= (zl
== NULL
) ? list_head(pl
) : list_next(pl
, zl
);
320 cio
->io_walk_link
= zl
;
325 ASSERT(zl
->zl_child
== cio
);
326 return (zl
->zl_parent
);
330 zio_walk_children(zio_t
*pio
)
332 zio_link_t
*zl
= pio
->io_walk_link
;
333 list_t
*cl
= &pio
->io_child_list
;
335 zl
= (zl
== NULL
) ? list_head(cl
) : list_next(cl
, zl
);
336 pio
->io_walk_link
= zl
;
341 ASSERT(zl
->zl_parent
== pio
);
342 return (zl
->zl_child
);
346 zio_unique_parent(zio_t
*cio
)
348 zio_t
*pio
= zio_walk_parents(cio
);
350 VERIFY(zio_walk_parents(cio
) == NULL
);
355 zio_add_child(zio_t
*pio
, zio_t
*cio
)
357 zio_link_t
*zl
= kmem_cache_alloc(zio_link_cache
, KM_SLEEP
);
360 * Logical I/Os can have logical, gang, or vdev children.
361 * Gang I/Os can have gang or vdev children.
362 * Vdev I/Os can only have vdev children.
363 * The following ASSERT captures all of these constraints.
365 ASSERT(cio
->io_child_type
<= pio
->io_child_type
);
370 mutex_enter(&cio
->io_lock
);
371 mutex_enter(&pio
->io_lock
);
373 ASSERT(pio
->io_state
[ZIO_WAIT_DONE
] == 0);
375 for (int w
= 0; w
< ZIO_WAIT_TYPES
; w
++)
376 pio
->io_children
[cio
->io_child_type
][w
] += !cio
->io_state
[w
];
378 list_insert_head(&pio
->io_child_list
, zl
);
379 list_insert_head(&cio
->io_parent_list
, zl
);
381 mutex_exit(&pio
->io_lock
);
382 mutex_exit(&cio
->io_lock
);
386 zio_remove_child(zio_t
*pio
, zio_t
*cio
, zio_link_t
*zl
)
388 ASSERT(zl
->zl_parent
== pio
);
389 ASSERT(zl
->zl_child
== cio
);
391 mutex_enter(&cio
->io_lock
);
392 mutex_enter(&pio
->io_lock
);
394 list_remove(&pio
->io_child_list
, zl
);
395 list_remove(&cio
->io_parent_list
, zl
);
397 mutex_exit(&pio
->io_lock
);
398 mutex_exit(&cio
->io_lock
);
400 kmem_cache_free(zio_link_cache
, zl
);
404 zio_wait_for_children(zio_t
*zio
, enum zio_child child
, enum zio_wait_type wait
)
406 uint64_t *countp
= &zio
->io_children
[child
][wait
];
407 boolean_t waiting
= B_FALSE
;
409 mutex_enter(&zio
->io_lock
);
410 ASSERT(zio
->io_stall
== NULL
);
413 zio
->io_stall
= countp
;
416 mutex_exit(&zio
->io_lock
);
422 zio_notify_parent(zio_t
*pio
, zio_t
*zio
, enum zio_wait_type wait
)
424 uint64_t *countp
= &pio
->io_children
[zio
->io_child_type
][wait
];
425 int *errorp
= &pio
->io_child_error
[zio
->io_child_type
];
427 mutex_enter(&pio
->io_lock
);
428 if (zio
->io_error
&& !(zio
->io_flags
& ZIO_FLAG_DONT_PROPAGATE
))
429 *errorp
= zio_worst_error(*errorp
, zio
->io_error
);
430 pio
->io_reexecute
|= zio
->io_reexecute
;
431 ASSERT3U(*countp
, >, 0);
432 if (--*countp
== 0 && pio
->io_stall
== countp
) {
433 pio
->io_stall
= NULL
;
434 mutex_exit(&pio
->io_lock
);
437 mutex_exit(&pio
->io_lock
);
442 zio_inherit_child_errors(zio_t
*zio
, enum zio_child c
)
444 if (zio
->io_child_error
[c
] != 0 && zio
->io_error
== 0)
445 zio
->io_error
= zio
->io_child_error
[c
];
449 * ==========================================================================
450 * Create the various types of I/O (read, write, free, etc)
451 * ==========================================================================
454 zio_create(zio_t
*pio
, spa_t
*spa
, uint64_t txg
, blkptr_t
*bp
,
455 void *data
, uint64_t size
, zio_done_func_t
*done
, void *private,
456 zio_type_t type
, int priority
, int flags
, vdev_t
*vd
, uint64_t offset
,
457 const zbookmark_t
*zb
, uint8_t stage
, uint32_t pipeline
)
461 ASSERT3U(size
, <=, SPA_MAXBLOCKSIZE
);
462 ASSERT(P2PHASE(size
, SPA_MINBLOCKSIZE
) == 0);
463 ASSERT(P2PHASE(offset
, SPA_MINBLOCKSIZE
) == 0);
465 ASSERT(!vd
|| spa_config_held(spa
, SCL_STATE_ALL
, RW_READER
));
466 ASSERT(!bp
|| !(flags
& ZIO_FLAG_CONFIG_WRITER
));
467 ASSERT(vd
|| stage
== ZIO_STAGE_OPEN
);
469 zio
= kmem_cache_alloc(zio_cache
, KM_SLEEP
);
470 bzero(zio
, sizeof (zio_t
));
472 mutex_init(&zio
->io_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
473 cv_init(&zio
->io_cv
, NULL
, CV_DEFAULT
, NULL
);
475 list_create(&zio
->io_parent_list
, sizeof (zio_link_t
),
476 offsetof(zio_link_t
, zl_parent_node
));
477 list_create(&zio
->io_child_list
, sizeof (zio_link_t
),
478 offsetof(zio_link_t
, zl_child_node
));
481 zio
->io_child_type
= ZIO_CHILD_VDEV
;
482 else if (flags
& ZIO_FLAG_GANG_CHILD
)
483 zio
->io_child_type
= ZIO_CHILD_GANG
;
485 zio
->io_child_type
= ZIO_CHILD_LOGICAL
;
489 zio
->io_bp_copy
= *bp
;
490 zio
->io_bp_orig
= *bp
;
491 if (type
!= ZIO_TYPE_WRITE
)
492 zio
->io_bp
= &zio
->io_bp_copy
; /* so caller can free */
493 if (zio
->io_child_type
== ZIO_CHILD_LOGICAL
) {
495 pipeline
|= ZIO_GANG_STAGES
;
496 zio
->io_logical
= zio
;
505 zio
->io_private
= private;
507 zio
->io_priority
= priority
;
509 zio
->io_offset
= offset
;
510 zio
->io_orig_flags
= zio
->io_flags
= flags
;
511 zio
->io_orig_stage
= zio
->io_stage
= stage
;
512 zio
->io_orig_pipeline
= zio
->io_pipeline
= pipeline
;
514 zio
->io_state
[ZIO_WAIT_READY
] = (stage
>= ZIO_STAGE_READY
);
515 zio
->io_state
[ZIO_WAIT_DONE
] = (stage
>= ZIO_STAGE_DONE
);
518 zio
->io_bookmark
= *zb
;
521 if (zio
->io_logical
== NULL
)
522 zio
->io_logical
= pio
->io_logical
;
523 zio_add_child(pio
, zio
);
530 zio_destroy(zio_t
*zio
)
532 spa_t
*spa
= zio
->io_spa
;
533 uint8_t async_root
= zio
->io_async_root
;
535 list_destroy(&zio
->io_parent_list
);
536 list_destroy(&zio
->io_child_list
);
537 mutex_destroy(&zio
->io_lock
);
538 cv_destroy(&zio
->io_cv
);
539 kmem_cache_free(zio_cache
, zio
);
542 mutex_enter(&spa
->spa_async_root_lock
);
543 if (--spa
->spa_async_root_count
== 0)
544 cv_broadcast(&spa
->spa_async_root_cv
);
545 mutex_exit(&spa
->spa_async_root_lock
);
550 zio_null(zio_t
*pio
, spa_t
*spa
, vdev_t
*vd
, zio_done_func_t
*done
,
551 void *private, int flags
)
555 zio
= zio_create(pio
, spa
, 0, NULL
, NULL
, 0, done
, private,
556 ZIO_TYPE_NULL
, ZIO_PRIORITY_NOW
, flags
, vd
, 0, NULL
,
557 ZIO_STAGE_OPEN
, ZIO_INTERLOCK_PIPELINE
);
563 zio_root(spa_t
*spa
, zio_done_func_t
*done
, void *private, int flags
)
565 return (zio_null(NULL
, spa
, NULL
, done
, private, flags
));
569 zio_read(zio_t
*pio
, spa_t
*spa
, const blkptr_t
*bp
,
570 void *data
, uint64_t size
, zio_done_func_t
*done
, void *private,
571 int priority
, int flags
, const zbookmark_t
*zb
)
575 zio
= zio_create(pio
, spa
, bp
->blk_birth
, (blkptr_t
*)bp
,
576 data
, size
, done
, private,
577 ZIO_TYPE_READ
, priority
, flags
, NULL
, 0, zb
,
578 ZIO_STAGE_OPEN
, ZIO_READ_PIPELINE
);
584 zio_skip_write(zio_t
*zio
)
586 ASSERT(zio
->io_type
== ZIO_TYPE_WRITE
);
587 ASSERT(zio
->io_stage
== ZIO_STAGE_READY
);
588 ASSERT(!BP_IS_GANG(zio
->io_bp
));
590 zio
->io_pipeline
&= ~ZIO_VDEV_IO_STAGES
;
594 zio_write(zio_t
*pio
, spa_t
*spa
, uint64_t txg
, blkptr_t
*bp
,
595 void *data
, uint64_t size
, zio_prop_t
*zp
,
596 zio_done_func_t
*ready
, zio_done_func_t
*done
, void *private,
597 int priority
, int flags
, const zbookmark_t
*zb
)
601 ASSERT(zp
->zp_checksum
>= ZIO_CHECKSUM_OFF
&&
602 zp
->zp_checksum
< ZIO_CHECKSUM_FUNCTIONS
&&
603 zp
->zp_compress
>= ZIO_COMPRESS_OFF
&&
604 zp
->zp_compress
< ZIO_COMPRESS_FUNCTIONS
&&
605 zp
->zp_type
< DMU_OT_NUMTYPES
&&
608 zp
->zp_ndvas
<= spa_max_replication(spa
));
609 ASSERT(ready
!= NULL
);
611 zio
= zio_create(pio
, spa
, txg
, bp
, data
, size
, done
, private,
612 ZIO_TYPE_WRITE
, priority
, flags
, NULL
, 0, zb
,
613 ZIO_STAGE_OPEN
, ZIO_WRITE_PIPELINE
);
615 zio
->io_ready
= ready
;
622 zio_rewrite(zio_t
*pio
, spa_t
*spa
, uint64_t txg
, blkptr_t
*bp
, void *data
,
623 uint64_t size
, zio_done_func_t
*done
, void *private, int priority
,
624 int flags
, zbookmark_t
*zb
)
628 zio
= zio_create(pio
, spa
, txg
, bp
, data
, size
, done
, private,
629 ZIO_TYPE_WRITE
, priority
, flags
, NULL
, 0, zb
,
630 ZIO_STAGE_OPEN
, ZIO_REWRITE_PIPELINE
);
636 zio_free(zio_t
*pio
, spa_t
*spa
, uint64_t txg
, blkptr_t
*bp
,
637 zio_done_func_t
*done
, void *private, int flags
)
641 ASSERT(!BP_IS_HOLE(bp
));
643 if (bp
->blk_fill
== BLK_FILL_ALREADY_FREED
)
644 return (zio_null(pio
, spa
, NULL
, NULL
, NULL
, flags
));
646 if (txg
== spa
->spa_syncing_txg
&&
647 spa_sync_pass(spa
) > SYNC_PASS_DEFERRED_FREE
) {
648 bplist_enqueue_deferred(&spa
->spa_sync_bplist
, bp
);
649 return (zio_null(pio
, spa
, NULL
, NULL
, NULL
, flags
));
652 zio
= zio_create(pio
, spa
, txg
, bp
, NULL
, BP_GET_PSIZE(bp
),
653 done
, private, ZIO_TYPE_FREE
, ZIO_PRIORITY_FREE
, flags
,
654 NULL
, 0, NULL
, ZIO_STAGE_OPEN
, ZIO_FREE_PIPELINE
);
660 zio_claim(zio_t
*pio
, spa_t
*spa
, uint64_t txg
, blkptr_t
*bp
,
661 zio_done_func_t
*done
, void *private, int flags
)
666 * A claim is an allocation of a specific block. Claims are needed
667 * to support immediate writes in the intent log. The issue is that
668 * immediate writes contain committed data, but in a txg that was
669 * *not* committed. Upon opening the pool after an unclean shutdown,
670 * the intent log claims all blocks that contain immediate write data
671 * so that the SPA knows they're in use.
673 * All claims *must* be resolved in the first txg -- before the SPA
674 * starts allocating blocks -- so that nothing is allocated twice.
676 ASSERT3U(spa
->spa_uberblock
.ub_rootbp
.blk_birth
, <, spa_first_txg(spa
));
677 ASSERT3U(spa_first_txg(spa
), <=, txg
);
679 zio
= zio_create(pio
, spa
, txg
, bp
, NULL
, BP_GET_PSIZE(bp
),
680 done
, private, ZIO_TYPE_CLAIM
, ZIO_PRIORITY_NOW
, flags
,
681 NULL
, 0, NULL
, ZIO_STAGE_OPEN
, ZIO_CLAIM_PIPELINE
);
687 zio_ioctl(zio_t
*pio
, spa_t
*spa
, vdev_t
*vd
, int cmd
,
688 zio_done_func_t
*done
, void *private, int priority
, int flags
)
693 if (vd
->vdev_children
== 0) {
694 zio
= zio_create(pio
, spa
, 0, NULL
, NULL
, 0, done
, private,
695 ZIO_TYPE_IOCTL
, priority
, flags
, vd
, 0, NULL
,
696 ZIO_STAGE_OPEN
, ZIO_IOCTL_PIPELINE
);
700 zio
= zio_null(pio
, spa
, NULL
, NULL
, NULL
, flags
);
702 for (c
= 0; c
< vd
->vdev_children
; c
++)
703 zio_nowait(zio_ioctl(zio
, spa
, vd
->vdev_child
[c
], cmd
,
704 done
, private, priority
, flags
));
711 zio_read_phys(zio_t
*pio
, vdev_t
*vd
, uint64_t offset
, uint64_t size
,
712 void *data
, int checksum
, zio_done_func_t
*done
, void *private,
713 int priority
, int flags
, boolean_t labels
)
717 ASSERT(vd
->vdev_children
== 0);
718 ASSERT(!labels
|| offset
+ size
<= VDEV_LABEL_START_SIZE
||
719 offset
>= vd
->vdev_psize
- VDEV_LABEL_END_SIZE
);
720 ASSERT3U(offset
+ size
, <=, vd
->vdev_psize
);
722 zio
= zio_create(pio
, vd
->vdev_spa
, 0, NULL
, data
, size
, done
, private,
723 ZIO_TYPE_READ
, priority
, flags
, vd
, offset
, NULL
,
724 ZIO_STAGE_OPEN
, ZIO_READ_PHYS_PIPELINE
);
726 zio
->io_prop
.zp_checksum
= checksum
;
732 zio_write_phys(zio_t
*pio
, vdev_t
*vd
, uint64_t offset
, uint64_t size
,
733 void *data
, int checksum
, zio_done_func_t
*done
, void *private,
734 int priority
, int flags
, boolean_t labels
)
738 ASSERT(vd
->vdev_children
== 0);
739 ASSERT(!labels
|| offset
+ size
<= VDEV_LABEL_START_SIZE
||
740 offset
>= vd
->vdev_psize
- VDEV_LABEL_END_SIZE
);
741 ASSERT3U(offset
+ size
, <=, vd
->vdev_psize
);
743 zio
= zio_create(pio
, vd
->vdev_spa
, 0, NULL
, data
, size
, done
, private,
744 ZIO_TYPE_WRITE
, priority
, flags
, vd
, offset
, NULL
,
745 ZIO_STAGE_OPEN
, ZIO_WRITE_PHYS_PIPELINE
);
747 zio
->io_prop
.zp_checksum
= checksum
;
749 if (zio_checksum_table
[checksum
].ci_zbt
) {
751 * zbt checksums are necessarily destructive -- they modify
752 * the end of the write buffer to hold the verifier/checksum.
753 * Therefore, we must make a local copy in case the data is
754 * being written to multiple places in parallel.
756 void *wbuf
= zio_buf_alloc(size
);
757 bcopy(data
, wbuf
, size
);
758 zio_push_transform(zio
, wbuf
, size
, size
, NULL
);
765 * Create a child I/O to do some work for us.
768 zio_vdev_child_io(zio_t
*pio
, blkptr_t
*bp
, vdev_t
*vd
, uint64_t offset
,
769 void *data
, uint64_t size
, int type
, int priority
, int flags
,
770 zio_done_func_t
*done
, void *private)
772 uint32_t pipeline
= ZIO_VDEV_CHILD_PIPELINE
;
775 ASSERT(vd
->vdev_parent
==
776 (pio
->io_vd
? pio
->io_vd
: pio
->io_spa
->spa_root_vdev
));
778 if (type
== ZIO_TYPE_READ
&& bp
!= NULL
) {
780 * If we have the bp, then the child should perform the
781 * checksum and the parent need not. This pushes error
782 * detection as close to the leaves as possible and
783 * eliminates redundant checksums in the interior nodes.
785 pipeline
|= 1U << ZIO_STAGE_CHECKSUM_VERIFY
;
786 pio
->io_pipeline
&= ~(1U << ZIO_STAGE_CHECKSUM_VERIFY
);
789 if (vd
->vdev_children
== 0)
790 offset
+= VDEV_LABEL_START_SIZE
;
792 zio
= zio_create(pio
, pio
->io_spa
, pio
->io_txg
, bp
, data
, size
,
793 done
, private, type
, priority
,
794 (pio
->io_flags
& ZIO_FLAG_VDEV_INHERIT
) |
795 ZIO_FLAG_CANFAIL
| ZIO_FLAG_DONT_PROPAGATE
| flags
,
796 vd
, offset
, &pio
->io_bookmark
,
797 ZIO_STAGE_VDEV_IO_START
- 1, pipeline
);
803 zio_vdev_delegated_io(vdev_t
*vd
, uint64_t offset
, void *data
, uint64_t size
,
804 int type
, int priority
, int flags
, zio_done_func_t
*done
, void *private)
808 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
810 zio
= zio_create(NULL
, vd
->vdev_spa
, 0, NULL
,
811 data
, size
, done
, private, type
, priority
,
812 flags
| ZIO_FLAG_CANFAIL
| ZIO_FLAG_DONT_RETRY
,
814 ZIO_STAGE_VDEV_IO_START
- 1, ZIO_VDEV_CHILD_PIPELINE
);
820 zio_flush(zio_t
*zio
, vdev_t
*vd
)
822 zio_nowait(zio_ioctl(zio
, zio
->io_spa
, vd
, DKIOCFLUSHWRITECACHE
,
823 NULL
, NULL
, ZIO_PRIORITY_NOW
,
824 ZIO_FLAG_CANFAIL
| ZIO_FLAG_DONT_PROPAGATE
| ZIO_FLAG_DONT_RETRY
));
828 * ==========================================================================
829 * Prepare to read and write logical blocks
830 * ==========================================================================
834 zio_read_bp_init(zio_t
*zio
)
836 blkptr_t
*bp
= zio
->io_bp
;
838 if (BP_GET_COMPRESS(bp
) != ZIO_COMPRESS_OFF
&&
839 zio
->io_logical
== zio
&& !(zio
->io_flags
& ZIO_FLAG_RAW
)) {
840 uint64_t csize
= BP_GET_PSIZE(bp
);
841 void *cbuf
= zio_buf_alloc(csize
);
843 zio_push_transform(zio
, cbuf
, csize
, csize
, zio_decompress
);
846 if (!dmu_ot
[BP_GET_TYPE(bp
)].ot_metadata
&& BP_GET_LEVEL(bp
) == 0)
847 zio
->io_flags
|= ZIO_FLAG_DONT_CACHE
;
849 return (ZIO_PIPELINE_CONTINUE
);
853 zio_write_bp_init(zio_t
*zio
)
855 zio_prop_t
*zp
= &zio
->io_prop
;
856 int compress
= zp
->zp_compress
;
857 blkptr_t
*bp
= zio
->io_bp
;
859 uint64_t lsize
= zio
->io_size
;
860 uint64_t csize
= lsize
;
861 uint64_t cbufsize
= 0;
865 * If our children haven't all reached the ready stage,
866 * wait for them and then repeat this pipeline stage.
868 if (zio_wait_for_children(zio
, ZIO_CHILD_GANG
, ZIO_WAIT_READY
) ||
869 zio_wait_for_children(zio
, ZIO_CHILD_LOGICAL
, ZIO_WAIT_READY
))
870 return (ZIO_PIPELINE_STOP
);
872 if (!IO_IS_ALLOCATING(zio
))
873 return (ZIO_PIPELINE_CONTINUE
);
875 ASSERT(compress
!= ZIO_COMPRESS_INHERIT
);
877 if (bp
->blk_birth
== zio
->io_txg
) {
879 * We're rewriting an existing block, which means we're
880 * working on behalf of spa_sync(). For spa_sync() to
881 * converge, it must eventually be the case that we don't
882 * have to allocate new blocks. But compression changes
883 * the blocksize, which forces a reallocate, and makes
884 * convergence take longer. Therefore, after the first
885 * few passes, stop compressing to ensure convergence.
887 pass
= spa_sync_pass(zio
->io_spa
);
890 if (pass
> SYNC_PASS_DONT_COMPRESS
)
891 compress
= ZIO_COMPRESS_OFF
;
894 * Only MOS (objset 0) data should need to be rewritten.
896 ASSERT(zio
->io_logical
->io_bookmark
.zb_objset
== 0);
898 /* Make sure someone doesn't change their mind on overwrites */
899 ASSERT(MIN(zp
->zp_ndvas
+ BP_IS_GANG(bp
),
900 spa_max_replication(zio
->io_spa
)) == BP_GET_NDVAS(bp
));
903 if (compress
!= ZIO_COMPRESS_OFF
) {
904 if (!zio_compress_data(compress
, zio
->io_data
, zio
->io_size
,
905 &cbuf
, &csize
, &cbufsize
)) {
906 compress
= ZIO_COMPRESS_OFF
;
907 } else if (csize
!= 0) {
908 zio_push_transform(zio
, cbuf
, csize
, cbufsize
, NULL
);
913 * The final pass of spa_sync() must be all rewrites, but the first
914 * few passes offer a trade-off: allocating blocks defers convergence,
915 * but newly allocated blocks are sequential, so they can be written
916 * to disk faster. Therefore, we allow the first few passes of
917 * spa_sync() to allocate new blocks, but force rewrites after that.
918 * There should only be a handful of blocks after pass 1 in any case.
920 if (bp
->blk_birth
== zio
->io_txg
&& BP_GET_PSIZE(bp
) == csize
&&
921 pass
> SYNC_PASS_REWRITE
) {
923 uint32_t gang_stages
= zio
->io_pipeline
& ZIO_GANG_STAGES
;
924 zio
->io_pipeline
= ZIO_REWRITE_PIPELINE
| gang_stages
;
925 zio
->io_flags
|= ZIO_FLAG_IO_REWRITE
;
928 zio
->io_pipeline
= ZIO_WRITE_PIPELINE
;
932 zio
->io_pipeline
= ZIO_INTERLOCK_PIPELINE
;
934 ASSERT(zp
->zp_checksum
!= ZIO_CHECKSUM_GANG_HEADER
);
935 BP_SET_LSIZE(bp
, lsize
);
936 BP_SET_PSIZE(bp
, csize
);
937 BP_SET_COMPRESS(bp
, compress
);
938 BP_SET_CHECKSUM(bp
, zp
->zp_checksum
);
939 BP_SET_TYPE(bp
, zp
->zp_type
);
940 BP_SET_LEVEL(bp
, zp
->zp_level
);
941 BP_SET_BYTEORDER(bp
, ZFS_HOST_BYTEORDER
);
944 return (ZIO_PIPELINE_CONTINUE
);
948 * ==========================================================================
949 * Execute the I/O pipeline
950 * ==========================================================================
954 zio_taskq_dispatch(zio_t
*zio
, enum zio_taskq_type q
)
956 zio_type_t t
= zio
->io_type
;
959 * If we're a config writer, the normal issue and interrupt threads
960 * may all be blocked waiting for the config lock. In this case,
961 * select the otherwise-unused taskq for ZIO_TYPE_NULL.
963 if (zio
->io_flags
& ZIO_FLAG_CONFIG_WRITER
)
967 * A similar issue exists for the L2ARC write thread until L2ARC 2.0.
969 if (t
== ZIO_TYPE_WRITE
&& zio
->io_vd
&& zio
->io_vd
->vdev_aux
)
972 (void) taskq_dispatch(zio
->io_spa
->spa_zio_taskq
[t
][q
],
973 (task_func_t
*)zio_execute
, zio
, TQ_SLEEP
);
977 zio_taskq_member(zio_t
*zio
, enum zio_taskq_type q
)
979 kthread_t
*executor
= zio
->io_executor
;
980 spa_t
*spa
= zio
->io_spa
;
982 for (zio_type_t t
= 0; t
< ZIO_TYPES
; t
++)
983 if (taskq_member(spa
->spa_zio_taskq
[t
][q
], executor
))
990 zio_issue_async(zio_t
*zio
)
992 zio_taskq_dispatch(zio
, ZIO_TASKQ_ISSUE
);
994 return (ZIO_PIPELINE_STOP
);
998 zio_interrupt(zio_t
*zio
)
1000 zio_taskq_dispatch(zio
, ZIO_TASKQ_INTERRUPT
);
1004 * Execute the I/O pipeline until one of the following occurs:
1005 * (1) the I/O completes; (2) the pipeline stalls waiting for
1006 * dependent child I/Os; (3) the I/O issues, so we're waiting
1007 * for an I/O completion interrupt; (4) the I/O is delegated by
1008 * vdev-level caching or aggregation; (5) the I/O is deferred
1009 * due to vdev-level queueing; (6) the I/O is handed off to
1010 * another thread. In all cases, the pipeline stops whenever
1011 * there's no CPU work; it never burns a thread in cv_wait().
1013 * There's no locking on io_stage because there's no legitimate way
1014 * for multiple threads to be attempting to process the same I/O.
1016 static zio_pipe_stage_t
*zio_pipeline
[ZIO_STAGES
];
1019 zio_execute(zio_t
*zio
)
1021 zio
->io_executor
= curthread
;
1023 while (zio
->io_stage
< ZIO_STAGE_DONE
) {
1024 uint32_t pipeline
= zio
->io_pipeline
;
1025 zio_stage_t stage
= zio
->io_stage
;
1028 ASSERT(!MUTEX_HELD(&zio
->io_lock
));
1030 while (((1U << ++stage
) & pipeline
) == 0)
1033 ASSERT(stage
<= ZIO_STAGE_DONE
);
1034 ASSERT(zio
->io_stall
== NULL
);
1037 * If we are in interrupt context and this pipeline stage
1038 * will grab a config lock that is held across I/O,
1039 * issue async to avoid deadlock.
1041 if (((1U << stage
) & ZIO_CONFIG_LOCK_BLOCKING_STAGES
) &&
1042 zio
->io_vd
== NULL
&&
1043 zio_taskq_member(zio
, ZIO_TASKQ_INTERRUPT
)) {
1044 zio_taskq_dispatch(zio
, ZIO_TASKQ_ISSUE
);
1048 zio
->io_stage
= stage
;
1049 rv
= zio_pipeline
[stage
](zio
);
1051 if (rv
== ZIO_PIPELINE_STOP
)
1054 ASSERT(rv
== ZIO_PIPELINE_CONTINUE
);
1059 * ==========================================================================
1060 * Initiate I/O, either sync or async
1061 * ==========================================================================
1064 zio_wait(zio_t
*zio
)
1068 ASSERT(zio
->io_stage
== ZIO_STAGE_OPEN
);
1069 ASSERT(zio
->io_executor
== NULL
);
1071 zio
->io_waiter
= curthread
;
1075 mutex_enter(&zio
->io_lock
);
1076 while (zio
->io_executor
!= NULL
)
1077 cv_wait(&zio
->io_cv
, &zio
->io_lock
);
1078 mutex_exit(&zio
->io_lock
);
1080 error
= zio
->io_error
;
1087 zio_nowait(zio_t
*zio
)
1089 ASSERT(zio
->io_executor
== NULL
);
1091 if (zio
->io_child_type
== ZIO_CHILD_LOGICAL
&&
1092 zio_unique_parent(zio
) == NULL
) {
1094 * This is a logical async I/O with no parent to wait for it.
1095 * Track how many outstanding I/Os of this type exist so
1096 * that spa_unload() knows when they are all done.
1098 spa_t
*spa
= zio
->io_spa
;
1099 zio
->io_async_root
= B_TRUE
;
1100 mutex_enter(&spa
->spa_async_root_lock
);
1101 spa
->spa_async_root_count
++;
1102 mutex_exit(&spa
->spa_async_root_lock
);
1109 * ==========================================================================
1110 * Reexecute or suspend/resume failed I/O
1111 * ==========================================================================
1115 zio_reexecute(zio_t
*pio
)
1117 zio_t
*cio
, *cio_next
;
1119 ASSERT(pio
->io_child_type
== ZIO_CHILD_LOGICAL
);
1120 ASSERT(pio
->io_orig_stage
== ZIO_STAGE_OPEN
);
1122 pio
->io_flags
= pio
->io_orig_flags
;
1123 pio
->io_stage
= pio
->io_orig_stage
;
1124 pio
->io_pipeline
= pio
->io_orig_pipeline
;
1125 pio
->io_reexecute
= 0;
1127 for (int w
= 0; w
< ZIO_WAIT_TYPES
; w
++)
1128 pio
->io_state
[w
] = 0;
1129 for (int c
= 0; c
< ZIO_CHILD_TYPES
; c
++)
1130 pio
->io_child_error
[c
] = 0;
1132 if (IO_IS_ALLOCATING(pio
)) {
1134 * Remember the failed bp so that the io_ready() callback
1135 * can update its accounting upon reexecution. The block
1136 * was already freed in zio_done(); we indicate this with
1137 * a fill count of -1 so that zio_free() knows to skip it.
1139 blkptr_t
*bp
= pio
->io_bp
;
1140 ASSERT(bp
->blk_birth
== 0 || bp
->blk_birth
== pio
->io_txg
);
1141 bp
->blk_fill
= BLK_FILL_ALREADY_FREED
;
1142 pio
->io_bp_orig
= *bp
;
1147 * As we reexecute pio's children, new children could be created.
1148 * New children go to the head of pio's io_child_list, however,
1149 * so we will (correctly) not reexecute them. The key is that
1150 * the remainder of pio's io_child_list, from 'cio_next' onward,
1151 * cannot be affected by any side effects of reexecuting 'cio'.
1153 for (cio
= zio_walk_children(pio
); cio
!= NULL
; cio
= cio_next
) {
1154 cio_next
= zio_walk_children(pio
);
1155 mutex_enter(&pio
->io_lock
);
1156 for (int w
= 0; w
< ZIO_WAIT_TYPES
; w
++)
1157 pio
->io_children
[cio
->io_child_type
][w
]++;
1158 mutex_exit(&pio
->io_lock
);
1163 * Now that all children have been reexecuted, execute the parent.
1169 zio_suspend(spa_t
*spa
, zio_t
*zio
)
1171 if (spa_get_failmode(spa
) == ZIO_FAILURE_MODE_PANIC
)
1172 fm_panic("Pool '%s' has encountered an uncorrectable I/O "
1173 "failure and the failure mode property for this pool "
1174 "is set to panic.", spa_name(spa
));
1176 zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE
, spa
, NULL
, NULL
, 0, 0);
1178 mutex_enter(&spa
->spa_suspend_lock
);
1180 if (spa
->spa_suspend_zio_root
== NULL
)
1181 spa
->spa_suspend_zio_root
= zio_root(spa
, NULL
, NULL
, 0);
1183 spa
->spa_suspended
= B_TRUE
;
1186 ASSERT(zio
!= spa
->spa_suspend_zio_root
);
1187 ASSERT(zio
->io_child_type
== ZIO_CHILD_LOGICAL
);
1188 ASSERT(zio_unique_parent(zio
) == NULL
);
1189 ASSERT(zio
->io_stage
== ZIO_STAGE_DONE
);
1190 zio_add_child(spa
->spa_suspend_zio_root
, zio
);
1193 mutex_exit(&spa
->spa_suspend_lock
);
1197 zio_resume(spa_t
*spa
)
1199 zio_t
*pio
, *cio
, *cio_next
;
1202 * Reexecute all previously suspended i/o.
1204 mutex_enter(&spa
->spa_suspend_lock
);
1205 spa
->spa_suspended
= B_FALSE
;
1206 cv_broadcast(&spa
->spa_suspend_cv
);
1207 pio
= spa
->spa_suspend_zio_root
;
1208 spa
->spa_suspend_zio_root
= NULL
;
1209 mutex_exit(&spa
->spa_suspend_lock
);
1214 for (cio
= zio_walk_children(pio
); cio
!= NULL
; cio
= cio_next
) {
1215 zio_link_t
*zl
= pio
->io_walk_link
;
1216 cio_next
= zio_walk_children(pio
);
1217 zio_remove_child(pio
, cio
, zl
);
1221 ASSERT(pio
->io_children
[ZIO_CHILD_LOGICAL
][ZIO_WAIT_DONE
] == 0);
1223 (void) zio_wait(pio
);
1227 zio_resume_wait(spa_t
*spa
)
1229 mutex_enter(&spa
->spa_suspend_lock
);
1230 while (spa_suspended(spa
))
1231 cv_wait(&spa
->spa_suspend_cv
, &spa
->spa_suspend_lock
);
1232 mutex_exit(&spa
->spa_suspend_lock
);
1236 * ==========================================================================
1239 * A gang block is a collection of small blocks that looks to the DMU
1240 * like one large block. When zio_dva_allocate() cannot find a block
1241 * of the requested size, due to either severe fragmentation or the pool
1242 * being nearly full, it calls zio_write_gang_block() to construct the
1243 * block from smaller fragments.
1245 * A gang block consists of a gang header (zio_gbh_phys_t) and up to
1246 * three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like
1247 * an indirect block: it's an array of block pointers. It consumes
1248 * only one sector and hence is allocatable regardless of fragmentation.
1249 * The gang header's bps point to its gang members, which hold the data.
1251 * Gang blocks are self-checksumming, using the bp's <vdev, offset, txg>
1252 * as the verifier to ensure uniqueness of the SHA256 checksum.
1253 * Critically, the gang block bp's blk_cksum is the checksum of the data,
1254 * not the gang header. This ensures that data block signatures (needed for
1255 * deduplication) are independent of how the block is physically stored.
1257 * Gang blocks can be nested: a gang member may itself be a gang block.
1258 * Thus every gang block is a tree in which root and all interior nodes are
1259 * gang headers, and the leaves are normal blocks that contain user data.
1260 * The root of the gang tree is called the gang leader.
1262 * To perform any operation (read, rewrite, free, claim) on a gang block,
1263 * zio_gang_assemble() first assembles the gang tree (minus data leaves)
1264 * in the io_gang_tree field of the original logical i/o by recursively
1265 * reading the gang leader and all gang headers below it. This yields
1266 * an in-core tree containing the contents of every gang header and the
1267 * bps for every constituent of the gang block.
1269 * With the gang tree now assembled, zio_gang_issue() just walks the gang tree
1270 * and invokes a callback on each bp. To free a gang block, zio_gang_issue()
1271 * calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp.
1272 * zio_claim_gang() provides a similarly trivial wrapper for zio_claim().
1273 * zio_read_gang() is a wrapper around zio_read() that omits reading gang
1274 * headers, since we already have those in io_gang_tree. zio_rewrite_gang()
1275 * performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite()
1276 * of the gang header plus zio_checksum_compute() of the data to update the
1277 * gang header's blk_cksum as described above.
1279 * The two-phase assemble/issue model solves the problem of partial failure --
1280 * what if you'd freed part of a gang block but then couldn't read the
1281 * gang header for another part? Assembling the entire gang tree first
1282 * ensures that all the necessary gang header I/O has succeeded before
1283 * starting the actual work of free, claim, or write. Once the gang tree
1284 * is assembled, free and claim are in-memory operations that cannot fail.
1286 * In the event that a gang write fails, zio_dva_unallocate() walks the
1287 * gang tree to immediately free (i.e. insert back into the space map)
1288 * everything we've allocated. This ensures that we don't get ENOSPC
1289 * errors during repeated suspend/resume cycles due to a flaky device.
1291 * Gang rewrites only happen during sync-to-convergence. If we can't assemble
1292 * the gang tree, we won't modify the block, so we can safely defer the free
1293 * (knowing that the block is still intact). If we *can* assemble the gang
1294 * tree, then even if some of the rewrites fail, zio_dva_unallocate() will free
1295 * each constituent bp and we can allocate a new block on the next sync pass.
1297 * In all cases, the gang tree allows complete recovery from partial failure.
1298 * ==========================================================================
1302 zio_read_gang(zio_t
*pio
, blkptr_t
*bp
, zio_gang_node_t
*gn
, void *data
)
1307 return (zio_read(pio
, pio
->io_spa
, bp
, data
, BP_GET_PSIZE(bp
),
1308 NULL
, NULL
, pio
->io_priority
, ZIO_GANG_CHILD_FLAGS(pio
),
1309 &pio
->io_bookmark
));
1313 zio_rewrite_gang(zio_t
*pio
, blkptr_t
*bp
, zio_gang_node_t
*gn
, void *data
)
1318 zio
= zio_rewrite(pio
, pio
->io_spa
, pio
->io_txg
, bp
,
1319 gn
->gn_gbh
, SPA_GANGBLOCKSIZE
, NULL
, NULL
, pio
->io_priority
,
1320 ZIO_GANG_CHILD_FLAGS(pio
), &pio
->io_bookmark
);
1322 * As we rewrite each gang header, the pipeline will compute
1323 * a new gang block header checksum for it; but no one will
1324 * compute a new data checksum, so we do that here. The one
1325 * exception is the gang leader: the pipeline already computed
1326 * its data checksum because that stage precedes gang assembly.
1327 * (Presently, nothing actually uses interior data checksums;
1328 * this is just good hygiene.)
1330 if (gn
!= pio
->io_logical
->io_gang_tree
) {
1331 zio_checksum_compute(zio
, BP_GET_CHECKSUM(bp
),
1332 data
, BP_GET_PSIZE(bp
));
1335 zio
= zio_rewrite(pio
, pio
->io_spa
, pio
->io_txg
, bp
,
1336 data
, BP_GET_PSIZE(bp
), NULL
, NULL
, pio
->io_priority
,
1337 ZIO_GANG_CHILD_FLAGS(pio
), &pio
->io_bookmark
);
1345 zio_free_gang(zio_t
*pio
, blkptr_t
*bp
, zio_gang_node_t
*gn
, void *data
)
1347 return (zio_free(pio
, pio
->io_spa
, pio
->io_txg
, bp
,
1348 NULL
, NULL
, ZIO_GANG_CHILD_FLAGS(pio
)));
1353 zio_claim_gang(zio_t
*pio
, blkptr_t
*bp
, zio_gang_node_t
*gn
, void *data
)
1355 return (zio_claim(pio
, pio
->io_spa
, pio
->io_txg
, bp
,
1356 NULL
, NULL
, ZIO_GANG_CHILD_FLAGS(pio
)));
1359 static zio_gang_issue_func_t
*zio_gang_issue_func
[ZIO_TYPES
] = {
1368 static void zio_gang_tree_assemble_done(zio_t
*zio
);
1370 static zio_gang_node_t
*
1371 zio_gang_node_alloc(zio_gang_node_t
**gnpp
)
1373 zio_gang_node_t
*gn
;
1375 ASSERT(*gnpp
== NULL
);
1377 gn
= kmem_zalloc(sizeof (*gn
), KM_SLEEP
);
1378 gn
->gn_gbh
= zio_buf_alloc(SPA_GANGBLOCKSIZE
);
1385 zio_gang_node_free(zio_gang_node_t
**gnpp
)
1387 zio_gang_node_t
*gn
= *gnpp
;
1389 for (int g
= 0; g
< SPA_GBH_NBLKPTRS
; g
++)
1390 ASSERT(gn
->gn_child
[g
] == NULL
);
1392 zio_buf_free(gn
->gn_gbh
, SPA_GANGBLOCKSIZE
);
1393 kmem_free(gn
, sizeof (*gn
));
1398 zio_gang_tree_free(zio_gang_node_t
**gnpp
)
1400 zio_gang_node_t
*gn
= *gnpp
;
1405 for (int g
= 0; g
< SPA_GBH_NBLKPTRS
; g
++)
1406 zio_gang_tree_free(&gn
->gn_child
[g
]);
1408 zio_gang_node_free(gnpp
);
1412 zio_gang_tree_assemble(zio_t
*lio
, blkptr_t
*bp
, zio_gang_node_t
**gnpp
)
1414 zio_gang_node_t
*gn
= zio_gang_node_alloc(gnpp
);
1416 ASSERT(lio
->io_logical
== lio
);
1417 ASSERT(BP_IS_GANG(bp
));
1419 zio_nowait(zio_read(lio
, lio
->io_spa
, bp
, gn
->gn_gbh
,
1420 SPA_GANGBLOCKSIZE
, zio_gang_tree_assemble_done
, gn
,
1421 lio
->io_priority
, ZIO_GANG_CHILD_FLAGS(lio
), &lio
->io_bookmark
));
1425 zio_gang_tree_assemble_done(zio_t
*zio
)
1427 zio_t
*lio
= zio
->io_logical
;
1428 zio_gang_node_t
*gn
= zio
->io_private
;
1429 blkptr_t
*bp
= zio
->io_bp
;
1430 zio_t
*pio
= zio_unique_parent(zio
);
1433 ASSERT(zio_walk_children(zio
) == NULL
);
1438 if (BP_SHOULD_BYTESWAP(bp
))
1439 byteswap_uint64_array(zio
->io_data
, zio
->io_size
);
1441 ASSERT(zio
->io_data
== gn
->gn_gbh
);
1442 ASSERT(zio
->io_size
== SPA_GANGBLOCKSIZE
);
1443 ASSERT(gn
->gn_gbh
->zg_tail
.zbt_magic
== ZBT_MAGIC
);
1445 for (int g
= 0; g
< SPA_GBH_NBLKPTRS
; g
++) {
1446 blkptr_t
*gbp
= &gn
->gn_gbh
->zg_blkptr
[g
];
1447 if (!BP_IS_GANG(gbp
))
1449 zio_gang_tree_assemble(lio
, gbp
, &gn
->gn_child
[g
]);
1454 zio_gang_tree_issue(zio_t
*pio
, zio_gang_node_t
*gn
, blkptr_t
*bp
, void *data
)
1456 zio_t
*lio
= pio
->io_logical
;
1459 ASSERT(BP_IS_GANG(bp
) == !!gn
);
1460 ASSERT(BP_GET_CHECKSUM(bp
) == BP_GET_CHECKSUM(lio
->io_bp
));
1461 ASSERT(BP_GET_LSIZE(bp
) == BP_GET_PSIZE(bp
) || gn
== lio
->io_gang_tree
);
1464 * If you're a gang header, your data is in gn->gn_gbh.
1465 * If you're a gang member, your data is in 'data' and gn == NULL.
1467 zio
= zio_gang_issue_func
[lio
->io_type
](pio
, bp
, gn
, data
);
1470 ASSERT(gn
->gn_gbh
->zg_tail
.zbt_magic
== ZBT_MAGIC
);
1472 for (int g
= 0; g
< SPA_GBH_NBLKPTRS
; g
++) {
1473 blkptr_t
*gbp
= &gn
->gn_gbh
->zg_blkptr
[g
];
1474 if (BP_IS_HOLE(gbp
))
1476 zio_gang_tree_issue(zio
, gn
->gn_child
[g
], gbp
, data
);
1477 data
= (char *)data
+ BP_GET_PSIZE(gbp
);
1481 if (gn
== lio
->io_gang_tree
)
1482 ASSERT3P((char *)lio
->io_data
+ lio
->io_size
, ==, data
);
1489 zio_gang_assemble(zio_t
*zio
)
1491 blkptr_t
*bp
= zio
->io_bp
;
1493 ASSERT(BP_IS_GANG(bp
) && zio
== zio
->io_logical
);
1495 zio_gang_tree_assemble(zio
, bp
, &zio
->io_gang_tree
);
1497 return (ZIO_PIPELINE_CONTINUE
);
1501 zio_gang_issue(zio_t
*zio
)
1503 zio_t
*lio
= zio
->io_logical
;
1504 blkptr_t
*bp
= zio
->io_bp
;
1506 if (zio_wait_for_children(zio
, ZIO_CHILD_GANG
, ZIO_WAIT_DONE
))
1507 return (ZIO_PIPELINE_STOP
);
1509 ASSERT(BP_IS_GANG(bp
) && zio
== lio
);
1511 if (zio
->io_child_error
[ZIO_CHILD_GANG
] == 0)
1512 zio_gang_tree_issue(lio
, lio
->io_gang_tree
, bp
, lio
->io_data
);
1514 zio_gang_tree_free(&lio
->io_gang_tree
);
1516 zio
->io_pipeline
= ZIO_INTERLOCK_PIPELINE
;
1518 return (ZIO_PIPELINE_CONTINUE
);
1522 zio_write_gang_member_ready(zio_t
*zio
)
1524 zio_t
*pio
= zio_unique_parent(zio
);
1525 zio_t
*lio
= zio
->io_logical
;
1526 dva_t
*cdva
= zio
->io_bp
->blk_dva
;
1527 dva_t
*pdva
= pio
->io_bp
->blk_dva
;
1530 if (BP_IS_HOLE(zio
->io_bp
))
1533 ASSERT(BP_IS_HOLE(&zio
->io_bp_orig
));
1535 ASSERT(zio
->io_child_type
== ZIO_CHILD_GANG
);
1536 ASSERT3U(zio
->io_prop
.zp_ndvas
, ==, lio
->io_prop
.zp_ndvas
);
1537 ASSERT3U(zio
->io_prop
.zp_ndvas
, <=, BP_GET_NDVAS(zio
->io_bp
));
1538 ASSERT3U(pio
->io_prop
.zp_ndvas
, <=, BP_GET_NDVAS(pio
->io_bp
));
1539 ASSERT3U(BP_GET_NDVAS(zio
->io_bp
), <=, BP_GET_NDVAS(pio
->io_bp
));
1541 mutex_enter(&pio
->io_lock
);
1542 for (int d
= 0; d
< BP_GET_NDVAS(zio
->io_bp
); d
++) {
1543 ASSERT(DVA_GET_GANG(&pdva
[d
]));
1544 asize
= DVA_GET_ASIZE(&pdva
[d
]);
1545 asize
+= DVA_GET_ASIZE(&cdva
[d
]);
1546 DVA_SET_ASIZE(&pdva
[d
], asize
);
1548 mutex_exit(&pio
->io_lock
);
1552 zio_write_gang_block(zio_t
*pio
)
1554 spa_t
*spa
= pio
->io_spa
;
1555 blkptr_t
*bp
= pio
->io_bp
;
1556 zio_t
*lio
= pio
->io_logical
;
1558 zio_gang_node_t
*gn
, **gnpp
;
1559 zio_gbh_phys_t
*gbh
;
1560 uint64_t txg
= pio
->io_txg
;
1561 uint64_t resid
= pio
->io_size
;
1563 int ndvas
= lio
->io_prop
.zp_ndvas
;
1564 int gbh_ndvas
= MIN(ndvas
+ 1, spa_max_replication(spa
));
1568 error
= metaslab_alloc(spa
, spa
->spa_normal_class
, SPA_GANGBLOCKSIZE
,
1569 bp
, gbh_ndvas
, txg
, pio
== lio
? NULL
: lio
->io_bp
,
1570 METASLAB_HINTBP_FAVOR
| METASLAB_GANG_HEADER
);
1572 pio
->io_error
= error
;
1573 return (ZIO_PIPELINE_CONTINUE
);
1577 gnpp
= &lio
->io_gang_tree
;
1579 gnpp
= pio
->io_private
;
1580 ASSERT(pio
->io_ready
== zio_write_gang_member_ready
);
1583 gn
= zio_gang_node_alloc(gnpp
);
1585 bzero(gbh
, SPA_GANGBLOCKSIZE
);
1588 * Create the gang header.
1590 zio
= zio_rewrite(pio
, spa
, txg
, bp
, gbh
, SPA_GANGBLOCKSIZE
, NULL
, NULL
,
1591 pio
->io_priority
, ZIO_GANG_CHILD_FLAGS(pio
), &pio
->io_bookmark
);
1594 * Create and nowait the gang children.
1596 for (int g
= 0; resid
!= 0; resid
-= lsize
, g
++) {
1597 lsize
= P2ROUNDUP(resid
/ (SPA_GBH_NBLKPTRS
- g
),
1599 ASSERT(lsize
>= SPA_MINBLOCKSIZE
&& lsize
<= resid
);
1601 zp
.zp_checksum
= lio
->io_prop
.zp_checksum
;
1602 zp
.zp_compress
= ZIO_COMPRESS_OFF
;
1603 zp
.zp_type
= DMU_OT_NONE
;
1605 zp
.zp_ndvas
= lio
->io_prop
.zp_ndvas
;
1607 zio_nowait(zio_write(zio
, spa
, txg
, &gbh
->zg_blkptr
[g
],
1608 (char *)pio
->io_data
+ (pio
->io_size
- resid
), lsize
, &zp
,
1609 zio_write_gang_member_ready
, NULL
, &gn
->gn_child
[g
],
1610 pio
->io_priority
, ZIO_GANG_CHILD_FLAGS(pio
),
1611 &pio
->io_bookmark
));
1615 * Set pio's pipeline to just wait for zio to finish.
1617 pio
->io_pipeline
= ZIO_INTERLOCK_PIPELINE
;
1621 return (ZIO_PIPELINE_CONTINUE
);
1625 * ==========================================================================
1626 * Allocate and free blocks
1627 * ==========================================================================
1631 zio_dva_allocate(zio_t
*zio
)
1633 spa_t
*spa
= zio
->io_spa
;
1634 metaslab_class_t
*mc
= spa
->spa_normal_class
;
1635 blkptr_t
*bp
= zio
->io_bp
;
1638 ASSERT(BP_IS_HOLE(bp
));
1639 ASSERT3U(BP_GET_NDVAS(bp
), ==, 0);
1640 ASSERT3U(zio
->io_prop
.zp_ndvas
, >, 0);
1641 ASSERT3U(zio
->io_prop
.zp_ndvas
, <=, spa_max_replication(spa
));
1642 ASSERT3U(zio
->io_size
, ==, BP_GET_PSIZE(bp
));
1644 error
= metaslab_alloc(spa
, mc
, zio
->io_size
, bp
,
1645 zio
->io_prop
.zp_ndvas
, zio
->io_txg
, NULL
, 0);
1648 if (error
== ENOSPC
&& zio
->io_size
> SPA_MINBLOCKSIZE
)
1649 return (zio_write_gang_block(zio
));
1650 zio
->io_error
= error
;
1653 return (ZIO_PIPELINE_CONTINUE
);
1657 zio_dva_free(zio_t
*zio
)
1659 metaslab_free(zio
->io_spa
, zio
->io_bp
, zio
->io_txg
, B_FALSE
);
1661 return (ZIO_PIPELINE_CONTINUE
);
1665 zio_dva_claim(zio_t
*zio
)
1669 error
= metaslab_claim(zio
->io_spa
, zio
->io_bp
, zio
->io_txg
);
1671 zio
->io_error
= error
;
1673 return (ZIO_PIPELINE_CONTINUE
);
1677 * Undo an allocation. This is used by zio_done() when an I/O fails
1678 * and we want to give back the block we just allocated.
1679 * This handles both normal blocks and gang blocks.
1682 zio_dva_unallocate(zio_t
*zio
, zio_gang_node_t
*gn
, blkptr_t
*bp
)
1684 spa_t
*spa
= zio
->io_spa
;
1685 boolean_t now
= !(zio
->io_flags
& ZIO_FLAG_IO_REWRITE
);
1687 ASSERT(bp
->blk_birth
== zio
->io_txg
|| BP_IS_HOLE(bp
));
1689 if (zio
->io_bp
== bp
&& !now
) {
1691 * This is a rewrite for sync-to-convergence.
1692 * We can't do a metaslab_free(NOW) because bp wasn't allocated
1693 * during this sync pass, which means that metaslab_sync()
1694 * already committed the allocation.
1696 ASSERT(DVA_EQUAL(BP_IDENTITY(bp
),
1697 BP_IDENTITY(&zio
->io_bp_orig
)));
1698 ASSERT(spa_sync_pass(spa
) > 1);
1700 if (BP_IS_GANG(bp
) && gn
== NULL
) {
1702 * This is a gang leader whose gang header(s) we
1703 * couldn't read now, so defer the free until later.
1704 * The block should still be intact because without
1705 * the headers, we'd never even start the rewrite.
1707 bplist_enqueue_deferred(&spa
->spa_sync_bplist
, bp
);
1712 if (!BP_IS_HOLE(bp
))
1713 metaslab_free(spa
, bp
, bp
->blk_birth
, now
);
1716 for (int g
= 0; g
< SPA_GBH_NBLKPTRS
; g
++) {
1717 zio_dva_unallocate(zio
, gn
->gn_child
[g
],
1718 &gn
->gn_gbh
->zg_blkptr
[g
]);
1724 * Try to allocate an intent log block. Return 0 on success, errno on failure.
1727 zio_alloc_blk(spa_t
*spa
, uint64_t size
, blkptr_t
*new_bp
, blkptr_t
*old_bp
,
1732 error
= metaslab_alloc(spa
, spa
->spa_log_class
, size
,
1733 new_bp
, 1, txg
, old_bp
, METASLAB_HINTBP_AVOID
);
1736 error
= metaslab_alloc(spa
, spa
->spa_normal_class
, size
,
1737 new_bp
, 1, txg
, old_bp
, METASLAB_HINTBP_AVOID
);
1740 BP_SET_LSIZE(new_bp
, size
);
1741 BP_SET_PSIZE(new_bp
, size
);
1742 BP_SET_COMPRESS(new_bp
, ZIO_COMPRESS_OFF
);
1743 BP_SET_CHECKSUM(new_bp
, ZIO_CHECKSUM_ZILOG
);
1744 BP_SET_TYPE(new_bp
, DMU_OT_INTENT_LOG
);
1745 BP_SET_LEVEL(new_bp
, 0);
1746 BP_SET_BYTEORDER(new_bp
, ZFS_HOST_BYTEORDER
);
1753 * Free an intent log block. We know it can't be a gang block, so there's
1754 * nothing to do except metaslab_free() it.
1757 zio_free_blk(spa_t
*spa
, blkptr_t
*bp
, uint64_t txg
)
1759 ASSERT(!BP_IS_GANG(bp
));
1761 metaslab_free(spa
, bp
, txg
, B_FALSE
);
1765 * ==========================================================================
1766 * Read and write to physical devices
1767 * ==========================================================================
1770 zio_vdev_io_start(zio_t
*zio
)
1772 vdev_t
*vd
= zio
->io_vd
;
1774 spa_t
*spa
= zio
->io_spa
;
1776 ASSERT(zio
->io_error
== 0);
1777 ASSERT(zio
->io_child_error
[ZIO_CHILD_VDEV
] == 0);
1780 if (!(zio
->io_flags
& ZIO_FLAG_CONFIG_WRITER
))
1781 spa_config_enter(spa
, SCL_ZIO
, zio
, RW_READER
);
1784 * The mirror_ops handle multiple DVAs in a single BP.
1786 return (vdev_mirror_ops
.vdev_op_io_start(zio
));
1789 align
= 1ULL << vd
->vdev_top
->vdev_ashift
;
1791 if (P2PHASE(zio
->io_size
, align
) != 0) {
1792 uint64_t asize
= P2ROUNDUP(zio
->io_size
, align
);
1793 char *abuf
= zio_buf_alloc(asize
);
1794 ASSERT(vd
== vd
->vdev_top
);
1795 if (zio
->io_type
== ZIO_TYPE_WRITE
) {
1796 bcopy(zio
->io_data
, abuf
, zio
->io_size
);
1797 bzero(abuf
+ zio
->io_size
, asize
- zio
->io_size
);
1799 zio_push_transform(zio
, abuf
, asize
, asize
, zio_subblock
);
1802 ASSERT(P2PHASE(zio
->io_offset
, align
) == 0);
1803 ASSERT(P2PHASE(zio
->io_size
, align
) == 0);
1804 ASSERT(zio
->io_type
!= ZIO_TYPE_WRITE
|| spa_writeable(spa
));
1807 * If this is a repair I/O, and there's no self-healing involved --
1808 * that is, we're just resilvering what we expect to resilver --
1809 * then don't do the I/O unless zio's txg is actually in vd's DTL.
1810 * This prevents spurious resilvering with nested replication.
1811 * For example, given a mirror of mirrors, (A+B)+(C+D), if only
1812 * A is out of date, we'll read from C+D, then use the data to
1813 * resilver A+B -- but we don't actually want to resilver B, just A.
1814 * The top-level mirror has no way to know this, so instead we just
1815 * discard unnecessary repairs as we work our way down the vdev tree.
1816 * The same logic applies to any form of nested replication:
1817 * ditto + mirror, RAID-Z + replacing, etc. This covers them all.
1819 if ((zio
->io_flags
& ZIO_FLAG_IO_REPAIR
) &&
1820 !(zio
->io_flags
& ZIO_FLAG_SELF_HEAL
) &&
1821 zio
->io_txg
!= 0 && /* not a delegated i/o */
1822 !vdev_dtl_contains(vd
, DTL_PARTIAL
, zio
->io_txg
, 1)) {
1823 ASSERT(zio
->io_type
== ZIO_TYPE_WRITE
);
1824 zio_vdev_io_bypass(zio
);
1825 return (ZIO_PIPELINE_CONTINUE
);
1828 if (vd
->vdev_ops
->vdev_op_leaf
&&
1829 (zio
->io_type
== ZIO_TYPE_READ
|| zio
->io_type
== ZIO_TYPE_WRITE
)) {
1831 if (zio
->io_type
== ZIO_TYPE_READ
&& vdev_cache_read(zio
) == 0)
1832 return (ZIO_PIPELINE_CONTINUE
);
1834 if ((zio
= vdev_queue_io(zio
)) == NULL
)
1835 return (ZIO_PIPELINE_STOP
);
1837 if (!vdev_accessible(vd
, zio
)) {
1838 zio
->io_error
= ENXIO
;
1840 return (ZIO_PIPELINE_STOP
);
1844 return (vd
->vdev_ops
->vdev_op_io_start(zio
));
1848 zio_vdev_io_done(zio_t
*zio
)
1850 vdev_t
*vd
= zio
->io_vd
;
1851 vdev_ops_t
*ops
= vd
? vd
->vdev_ops
: &vdev_mirror_ops
;
1852 boolean_t unexpected_error
= B_FALSE
;
1854 if (zio_wait_for_children(zio
, ZIO_CHILD_VDEV
, ZIO_WAIT_DONE
))
1855 return (ZIO_PIPELINE_STOP
);
1857 ASSERT(zio
->io_type
== ZIO_TYPE_READ
|| zio
->io_type
== ZIO_TYPE_WRITE
);
1859 if (vd
!= NULL
&& vd
->vdev_ops
->vdev_op_leaf
) {
1861 vdev_queue_io_done(zio
);
1863 if (zio
->io_type
== ZIO_TYPE_WRITE
)
1864 vdev_cache_write(zio
);
1866 if (zio_injection_enabled
&& zio
->io_error
== 0)
1867 zio
->io_error
= zio_handle_device_injection(vd
, EIO
);
1869 if (zio_injection_enabled
&& zio
->io_error
== 0)
1870 zio
->io_error
= zio_handle_label_injection(zio
, EIO
);
1872 if (zio
->io_error
) {
1873 if (!vdev_accessible(vd
, zio
)) {
1874 zio
->io_error
= ENXIO
;
1876 unexpected_error
= B_TRUE
;
1881 ops
->vdev_op_io_done(zio
);
1883 if (unexpected_error
)
1884 VERIFY(vdev_probe(vd
, zio
) == NULL
);
1886 return (ZIO_PIPELINE_CONTINUE
);
1890 zio_vdev_io_assess(zio_t
*zio
)
1892 vdev_t
*vd
= zio
->io_vd
;
1894 if (zio_wait_for_children(zio
, ZIO_CHILD_VDEV
, ZIO_WAIT_DONE
))
1895 return (ZIO_PIPELINE_STOP
);
1897 if (vd
== NULL
&& !(zio
->io_flags
& ZIO_FLAG_CONFIG_WRITER
))
1898 spa_config_exit(zio
->io_spa
, SCL_ZIO
, zio
);
1900 if (zio
->io_vsd
!= NULL
) {
1901 zio
->io_vsd_free(zio
);
1905 if (zio_injection_enabled
&& zio
->io_error
== 0)
1906 zio
->io_error
= zio_handle_fault_injection(zio
, EIO
);
1909 * If the I/O failed, determine whether we should attempt to retry it.
1911 if (zio
->io_error
&& vd
== NULL
&&
1912 !(zio
->io_flags
& (ZIO_FLAG_DONT_RETRY
| ZIO_FLAG_IO_RETRY
))) {
1913 ASSERT(!(zio
->io_flags
& ZIO_FLAG_DONT_QUEUE
)); /* not a leaf */
1914 ASSERT(!(zio
->io_flags
& ZIO_FLAG_IO_BYPASS
)); /* not a leaf */
1916 zio
->io_flags
|= ZIO_FLAG_IO_RETRY
|
1917 ZIO_FLAG_DONT_CACHE
| ZIO_FLAG_DONT_AGGREGATE
;
1918 zio
->io_stage
= ZIO_STAGE_VDEV_IO_START
- 1;
1919 zio_taskq_dispatch(zio
, ZIO_TASKQ_ISSUE
);
1920 return (ZIO_PIPELINE_STOP
);
1924 * If we got an error on a leaf device, convert it to ENXIO
1925 * if the device is not accessible at all.
1927 if (zio
->io_error
&& vd
!= NULL
&& vd
->vdev_ops
->vdev_op_leaf
&&
1928 !vdev_accessible(vd
, zio
))
1929 zio
->io_error
= ENXIO
;
1932 * If we can't write to an interior vdev (mirror or RAID-Z),
1933 * set vdev_cant_write so that we stop trying to allocate from it.
1935 if (zio
->io_error
== ENXIO
&& zio
->io_type
== ZIO_TYPE_WRITE
&&
1936 vd
!= NULL
&& !vd
->vdev_ops
->vdev_op_leaf
)
1937 vd
->vdev_cant_write
= B_TRUE
;
1940 zio
->io_pipeline
= ZIO_INTERLOCK_PIPELINE
;
1942 return (ZIO_PIPELINE_CONTINUE
);
1946 zio_vdev_io_reissue(zio_t
*zio
)
1948 ASSERT(zio
->io_stage
== ZIO_STAGE_VDEV_IO_START
);
1949 ASSERT(zio
->io_error
== 0);
1955 zio_vdev_io_redone(zio_t
*zio
)
1957 ASSERT(zio
->io_stage
== ZIO_STAGE_VDEV_IO_DONE
);
1963 zio_vdev_io_bypass(zio_t
*zio
)
1965 ASSERT(zio
->io_stage
== ZIO_STAGE_VDEV_IO_START
);
1966 ASSERT(zio
->io_error
== 0);
1968 zio
->io_flags
|= ZIO_FLAG_IO_BYPASS
;
1969 zio
->io_stage
= ZIO_STAGE_VDEV_IO_ASSESS
- 1;
1973 * ==========================================================================
1974 * Generate and verify checksums
1975 * ==========================================================================
1978 zio_checksum_generate(zio_t
*zio
)
1980 blkptr_t
*bp
= zio
->io_bp
;
1981 enum zio_checksum checksum
;
1985 * This is zio_write_phys().
1986 * We're either generating a label checksum, or none at all.
1988 checksum
= zio
->io_prop
.zp_checksum
;
1990 if (checksum
== ZIO_CHECKSUM_OFF
)
1991 return (ZIO_PIPELINE_CONTINUE
);
1993 ASSERT(checksum
== ZIO_CHECKSUM_LABEL
);
1995 if (BP_IS_GANG(bp
) && zio
->io_child_type
== ZIO_CHILD_GANG
) {
1996 ASSERT(!IO_IS_ALLOCATING(zio
));
1997 checksum
= ZIO_CHECKSUM_GANG_HEADER
;
1999 checksum
= BP_GET_CHECKSUM(bp
);
2003 zio_checksum_compute(zio
, checksum
, zio
->io_data
, zio
->io_size
);
2005 return (ZIO_PIPELINE_CONTINUE
);
2009 zio_checksum_verify(zio_t
*zio
)
2011 blkptr_t
*bp
= zio
->io_bp
;
2016 * This is zio_read_phys().
2017 * We're either verifying a label checksum, or nothing at all.
2019 if (zio
->io_prop
.zp_checksum
== ZIO_CHECKSUM_OFF
)
2020 return (ZIO_PIPELINE_CONTINUE
);
2022 ASSERT(zio
->io_prop
.zp_checksum
== ZIO_CHECKSUM_LABEL
);
2025 if ((error
= zio_checksum_error(zio
)) != 0) {
2026 zio
->io_error
= error
;
2027 if (!(zio
->io_flags
& ZIO_FLAG_SPECULATIVE
)) {
2028 zfs_ereport_post(FM_EREPORT_ZFS_CHECKSUM
,
2029 zio
->io_spa
, zio
->io_vd
, zio
, 0, 0);
2033 return (ZIO_PIPELINE_CONTINUE
);
2037 * Called by RAID-Z to ensure we don't compute the checksum twice.
2040 zio_checksum_verified(zio_t
*zio
)
2042 zio
->io_pipeline
&= ~(1U << ZIO_STAGE_CHECKSUM_VERIFY
);
2046 * ==========================================================================
2047 * Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
2048 * An error of 0 indictes success. ENXIO indicates whole-device failure,
2049 * which may be transient (e.g. unplugged) or permament. ECKSUM and EIO
2050 * indicate errors that are specific to one I/O, and most likely permanent.
2051 * Any other error is presumed to be worse because we weren't expecting it.
2052 * ==========================================================================
2055 zio_worst_error(int e1
, int e2
)
2057 static int zio_error_rank
[] = { 0, ENXIO
, ECKSUM
, EIO
};
2060 for (r1
= 0; r1
< sizeof (zio_error_rank
) / sizeof (int); r1
++)
2061 if (e1
== zio_error_rank
[r1
])
2064 for (r2
= 0; r2
< sizeof (zio_error_rank
) / sizeof (int); r2
++)
2065 if (e2
== zio_error_rank
[r2
])
2068 return (r1
> r2
? e1
: e2
);
2072 * ==========================================================================
2074 * ==========================================================================
2077 zio_ready(zio_t
*zio
)
2079 blkptr_t
*bp
= zio
->io_bp
;
2080 zio_t
*pio
, *pio_next
;
2082 if (zio
->io_ready
) {
2083 if (BP_IS_GANG(bp
) &&
2084 zio_wait_for_children(zio
, ZIO_CHILD_GANG
, ZIO_WAIT_READY
))
2085 return (ZIO_PIPELINE_STOP
);
2087 ASSERT(IO_IS_ALLOCATING(zio
));
2088 ASSERT(bp
->blk_birth
== zio
->io_txg
|| BP_IS_HOLE(bp
));
2089 ASSERT(zio
->io_children
[ZIO_CHILD_GANG
][ZIO_WAIT_READY
] == 0);
2094 if (bp
!= NULL
&& bp
!= &zio
->io_bp_copy
)
2095 zio
->io_bp_copy
= *bp
;
2098 zio
->io_pipeline
= ZIO_INTERLOCK_PIPELINE
;
2100 mutex_enter(&zio
->io_lock
);
2101 zio
->io_state
[ZIO_WAIT_READY
] = 1;
2102 pio
= zio_walk_parents(zio
);
2103 mutex_exit(&zio
->io_lock
);
2106 * As we notify zio's parents, new parents could be added.
2107 * New parents go to the head of zio's io_parent_list, however,
2108 * so we will (correctly) not notify them. The remainder of zio's
2109 * io_parent_list, from 'pio_next' onward, cannot change because
2110 * all parents must wait for us to be done before they can be done.
2112 for (; pio
!= NULL
; pio
= pio_next
) {
2113 pio_next
= zio_walk_parents(zio
);
2114 zio_notify_parent(pio
, zio
, ZIO_WAIT_READY
);
2117 return (ZIO_PIPELINE_CONTINUE
);
2121 zio_done(zio_t
*zio
)
2123 spa_t
*spa
= zio
->io_spa
;
2124 zio_t
*lio
= zio
->io_logical
;
2125 blkptr_t
*bp
= zio
->io_bp
;
2126 vdev_t
*vd
= zio
->io_vd
;
2127 uint64_t psize
= zio
->io_size
;
2128 zio_t
*pio
, *pio_next
;
2131 * If our of children haven't all completed,
2132 * wait for them and then repeat this pipeline stage.
2134 if (zio_wait_for_children(zio
, ZIO_CHILD_VDEV
, ZIO_WAIT_DONE
) ||
2135 zio_wait_for_children(zio
, ZIO_CHILD_GANG
, ZIO_WAIT_DONE
) ||
2136 zio_wait_for_children(zio
, ZIO_CHILD_LOGICAL
, ZIO_WAIT_DONE
))
2137 return (ZIO_PIPELINE_STOP
);
2139 for (int c
= 0; c
< ZIO_CHILD_TYPES
; c
++)
2140 for (int w
= 0; w
< ZIO_WAIT_TYPES
; w
++)
2141 ASSERT(zio
->io_children
[c
][w
] == 0);
2144 ASSERT(bp
->blk_pad
[0] == 0);
2145 ASSERT(bp
->blk_pad
[1] == 0);
2146 ASSERT(bp
->blk_pad
[2] == 0);
2147 ASSERT(bcmp(bp
, &zio
->io_bp_copy
, sizeof (blkptr_t
)) == 0 ||
2148 (bp
== zio_unique_parent(zio
)->io_bp
));
2149 if (zio
->io_type
== ZIO_TYPE_WRITE
&& !BP_IS_HOLE(bp
) &&
2150 !(zio
->io_flags
& ZIO_FLAG_IO_REPAIR
)) {
2151 ASSERT(!BP_SHOULD_BYTESWAP(bp
));
2152 ASSERT3U(zio
->io_prop
.zp_ndvas
, <=, BP_GET_NDVAS(bp
));
2153 ASSERT(BP_COUNT_GANG(bp
) == 0 ||
2154 (BP_COUNT_GANG(bp
) == BP_GET_NDVAS(bp
)));
2159 * If there were child vdev or gang errors, they apply to us now.
2161 zio_inherit_child_errors(zio
, ZIO_CHILD_VDEV
);
2162 zio_inherit_child_errors(zio
, ZIO_CHILD_GANG
);
2164 zio_pop_transforms(zio
); /* note: may set zio->io_error */
2166 vdev_stat_update(zio
, psize
);
2168 if (zio
->io_error
) {
2170 * If this I/O is attached to a particular vdev,
2171 * generate an error message describing the I/O failure
2172 * at the block level. We ignore these errors if the
2173 * device is currently unavailable.
2175 if (zio
->io_error
!= ECKSUM
&& vd
!= NULL
&& !vdev_is_dead(vd
))
2176 zfs_ereport_post(FM_EREPORT_ZFS_IO
, spa
, vd
, zio
, 0, 0);
2178 if ((zio
->io_error
== EIO
||
2179 !(zio
->io_flags
& ZIO_FLAG_SPECULATIVE
)) && zio
== lio
) {
2181 * For logical I/O requests, tell the SPA to log the
2182 * error and generate a logical data ereport.
2184 spa_log_error(spa
, zio
);
2185 zfs_ereport_post(FM_EREPORT_ZFS_DATA
, spa
, NULL
, zio
,
2190 if (zio
->io_error
&& zio
== lio
) {
2192 * Determine whether zio should be reexecuted. This will
2193 * propagate all the way to the root via zio_notify_parent().
2195 ASSERT(vd
== NULL
&& bp
!= NULL
);
2197 if (IO_IS_ALLOCATING(zio
))
2198 if (zio
->io_error
!= ENOSPC
)
2199 zio
->io_reexecute
|= ZIO_REEXECUTE_NOW
;
2201 zio
->io_reexecute
|= ZIO_REEXECUTE_SUSPEND
;
2203 if ((zio
->io_type
== ZIO_TYPE_READ
||
2204 zio
->io_type
== ZIO_TYPE_FREE
) &&
2205 zio
->io_error
== ENXIO
&&
2206 spa
->spa_load_state
== SPA_LOAD_NONE
&&
2207 spa_get_failmode(spa
) != ZIO_FAILURE_MODE_CONTINUE
)
2208 zio
->io_reexecute
|= ZIO_REEXECUTE_SUSPEND
;
2210 if (!(zio
->io_flags
& ZIO_FLAG_CANFAIL
) && !zio
->io_reexecute
)
2211 zio
->io_reexecute
|= ZIO_REEXECUTE_SUSPEND
;
2215 * If there were logical child errors, they apply to us now.
2216 * We defer this until now to avoid conflating logical child
2217 * errors with errors that happened to the zio itself when
2218 * updating vdev stats and reporting FMA events above.
2220 zio_inherit_child_errors(zio
, ZIO_CHILD_LOGICAL
);
2222 if (zio
->io_reexecute
) {
2224 * This is a logical I/O that wants to reexecute.
2226 * Reexecute is top-down. When an i/o fails, if it's not
2227 * the root, it simply notifies its parent and sticks around.
2228 * The parent, seeing that it still has children in zio_done(),
2229 * does the same. This percolates all the way up to the root.
2230 * The root i/o will reexecute or suspend the entire tree.
2232 * This approach ensures that zio_reexecute() honors
2233 * all the original i/o dependency relationships, e.g.
2234 * parents not executing until children are ready.
2236 ASSERT(zio
->io_child_type
== ZIO_CHILD_LOGICAL
);
2238 if (IO_IS_ALLOCATING(zio
))
2239 zio_dva_unallocate(zio
, zio
->io_gang_tree
, bp
);
2241 zio_gang_tree_free(&zio
->io_gang_tree
);
2243 mutex_enter(&zio
->io_lock
);
2244 zio
->io_state
[ZIO_WAIT_DONE
] = 1;
2245 mutex_exit(&zio
->io_lock
);
2247 if ((pio
= zio_unique_parent(zio
)) != NULL
) {
2249 * We're not a root i/o, so there's nothing to do
2250 * but notify our parent. Don't propagate errors
2251 * upward since we haven't permanently failed yet.
2253 zio
->io_flags
|= ZIO_FLAG_DONT_PROPAGATE
;
2254 zio_notify_parent(pio
, zio
, ZIO_WAIT_DONE
);
2255 } else if (zio
->io_reexecute
& ZIO_REEXECUTE_SUSPEND
) {
2257 * We'd fail again if we reexecuted now, so suspend
2258 * until conditions improve (e.g. device comes online).
2260 zio_suspend(spa
, zio
);
2263 * Reexecution is potentially a huge amount of work.
2264 * Hand it off to the otherwise-unused claim taskq.
2266 (void) taskq_dispatch(
2267 spa
->spa_zio_taskq
[ZIO_TYPE_CLAIM
][ZIO_TASKQ_ISSUE
],
2268 (task_func_t
*)zio_reexecute
, zio
, TQ_SLEEP
);
2270 return (ZIO_PIPELINE_STOP
);
2273 ASSERT(zio_walk_children(zio
) == NULL
);
2274 ASSERT(zio
->io_reexecute
== 0);
2275 ASSERT(zio
->io_error
== 0 || (zio
->io_flags
& ZIO_FLAG_CANFAIL
));
2278 * It is the responsibility of the done callback to ensure that this
2279 * particular zio is no longer discoverable for adoption, and as
2280 * such, cannot acquire any new parents.
2285 zio_gang_tree_free(&zio
->io_gang_tree
);
2287 mutex_enter(&zio
->io_lock
);
2288 zio
->io_state
[ZIO_WAIT_DONE
] = 1;
2289 mutex_exit(&zio
->io_lock
);
2291 for (pio
= zio_walk_parents(zio
); pio
!= NULL
; pio
= pio_next
) {
2292 zio_link_t
*zl
= zio
->io_walk_link
;
2293 pio_next
= zio_walk_parents(zio
);
2294 zio_remove_child(pio
, zio
, zl
);
2295 zio_notify_parent(pio
, zio
, ZIO_WAIT_DONE
);
2298 if (zio
->io_waiter
!= NULL
) {
2299 mutex_enter(&zio
->io_lock
);
2300 zio
->io_executor
= NULL
;
2301 cv_broadcast(&zio
->io_cv
);
2302 mutex_exit(&zio
->io_lock
);
2307 return (ZIO_PIPELINE_STOP
);
2311 * ==========================================================================
2312 * I/O pipeline definition
2313 * ==========================================================================
2315 static zio_pipe_stage_t
*zio_pipeline
[ZIO_STAGES
] = {
2320 zio_checksum_generate
,
2330 zio_checksum_verify
,