4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
24 * Copyright (c) 2017, Intel Corporation.
30 * To handle fault injection, we keep track of a series of zinject_record_t
31 * structures which describe which logical block(s) should be injected with a
32 * fault. These are kept in a global list. Each record corresponds to a given
33 * spa_t and maintains a special hold on the spa_t so that it cannot be deleted
34 * or exported while the injection record exists.
36 * Device level injection is done using the 'zi_guid' field. If this is set, it
37 * means that the error is destined for a particular device, not a piece of
40 * This is a rather poor data structure and algorithm, but we don't expect more
41 * than a few faults at any one time, so it should be sufficient for our needs.
46 #include <sys/zfs_ioctl.h>
47 #include <sys/vdev_impl.h>
48 #include <sys/dmu_objset.h>
49 #include <sys/fs/zfs.h>
51 uint32_t zio_injection_enabled
= 0;
54 * Data describing each zinject handler registered on the system, and
55 * contains the list node linking the handler in the global zinject
58 typedef struct inject_handler
{
61 zinject_record_t zi_record
;
68 * List of all zinject handlers registered on the system, protected by
69 * the inject_lock defined below.
71 static list_t inject_handlers
;
74 * This protects insertion into, and traversal of, the inject handler
75 * list defined above; as well as the inject_delay_count. Any time a
76 * handler is inserted or removed from the list, this lock should be
77 * taken as a RW_WRITER; and any time traversal is done over the list
78 * (without modification to it) this lock should be taken as a RW_READER.
80 static krwlock_t inject_lock
;
83 * This holds the number of zinject delay handlers that have been
84 * registered on the system. It is protected by the inject_lock defined
85 * above. Thus modifications to this count must be a RW_WRITER of the
86 * inject_lock, and reads of this count must be (at least) a RW_READER
89 static int inject_delay_count
= 0;
92 * This lock is used only in zio_handle_io_delay(), refer to the comment
93 * in that function for more details.
95 static kmutex_t inject_delay_mtx
;
98 * Used to assign unique identifying numbers to each new zinject handler.
100 static int inject_next_id
= 1;
103 * Test if the requested frequency was triggered
106 freq_triggered(uint32_t frequency
)
109 * zero implies always (100%)
115 * Note: we still handle legacy (unscaled) frequecy values
117 uint32_t maximum
= (frequency
<= 100) ? 100 : ZI_PERCENTAGE_MAX
;
119 return (spa_get_random(maximum
) < frequency
);
123 * Returns true if the given record matches the I/O in progress.
126 zio_match_handler(const zbookmark_phys_t
*zb
, uint64_t type
,
127 zinject_record_t
*record
, int error
)
130 * Check for a match against the MOS, which is based on type
132 if (zb
->zb_objset
== DMU_META_OBJSET
&&
133 record
->zi_objset
== DMU_META_OBJSET
&&
134 record
->zi_object
== DMU_META_DNODE_OBJECT
) {
135 if (record
->zi_type
== DMU_OT_NONE
||
136 type
== record
->zi_type
)
137 return (freq_triggered(record
->zi_freq
));
143 * Check for an exact match.
145 if (zb
->zb_objset
== record
->zi_objset
&&
146 zb
->zb_object
== record
->zi_object
&&
147 zb
->zb_level
== record
->zi_level
&&
148 zb
->zb_blkid
>= record
->zi_start
&&
149 zb
->zb_blkid
<= record
->zi_end
&&
150 error
== record
->zi_error
)
151 return (freq_triggered(record
->zi_freq
));
157 * Panic the system when a config change happens in the function
161 zio_handle_panic_injection(spa_t
*spa
, char *tag
, uint64_t type
)
163 inject_handler_t
*handler
;
165 rw_enter(&inject_lock
, RW_READER
);
167 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
168 handler
= list_next(&inject_handlers
, handler
)) {
170 if (spa
!= handler
->zi_spa
)
173 if (handler
->zi_record
.zi_type
== type
&&
174 strcmp(tag
, handler
->zi_record
.zi_func
) == 0)
175 panic("Panic requested in function %s\n", tag
);
178 rw_exit(&inject_lock
);
182 * Inject a decryption failure. Decryption failures can occur in
183 * both the ARC and the ZIO layers.
186 zio_handle_decrypt_injection(spa_t
*spa
, const zbookmark_phys_t
*zb
,
187 uint64_t type
, int error
)
190 inject_handler_t
*handler
;
192 rw_enter(&inject_lock
, RW_READER
);
194 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
195 handler
= list_next(&inject_handlers
, handler
)) {
197 if (spa
!= handler
->zi_spa
||
198 handler
->zi_record
.zi_cmd
!= ZINJECT_DECRYPT_FAULT
)
201 if (zio_match_handler(zb
, type
, &handler
->zi_record
, error
)) {
207 rw_exit(&inject_lock
);
212 * Determine if the I/O in question should return failure. Returns the errno
213 * to be returned to the caller.
216 zio_handle_fault_injection(zio_t
*zio
, int error
)
219 inject_handler_t
*handler
;
222 * Ignore I/O not associated with any logical data.
224 if (zio
->io_logical
== NULL
)
228 * Currently, we only support fault injection on reads.
230 if (zio
->io_type
!= ZIO_TYPE_READ
)
233 rw_enter(&inject_lock
, RW_READER
);
235 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
236 handler
= list_next(&inject_handlers
, handler
)) {
238 if (zio
->io_spa
!= handler
->zi_spa
||
239 handler
->zi_record
.zi_cmd
!= ZINJECT_DATA_FAULT
)
242 /* If this handler matches, return EIO */
243 if (zio_match_handler(&zio
->io_logical
->io_bookmark
,
244 zio
->io_bp
? BP_GET_TYPE(zio
->io_bp
) : DMU_OT_NONE
,
245 &handler
->zi_record
, error
)) {
251 rw_exit(&inject_lock
);
257 * Determine if the zio is part of a label update and has an injection
258 * handler associated with that portion of the label. Currently, we
259 * allow error injection in either the nvlist or the uberblock region of
263 zio_handle_label_injection(zio_t
*zio
, int error
)
265 inject_handler_t
*handler
;
266 vdev_t
*vd
= zio
->io_vd
;
267 uint64_t offset
= zio
->io_offset
;
271 if (offset
>= VDEV_LABEL_START_SIZE
&&
272 offset
< vd
->vdev_psize
- VDEV_LABEL_END_SIZE
)
275 rw_enter(&inject_lock
, RW_READER
);
277 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
278 handler
= list_next(&inject_handlers
, handler
)) {
279 uint64_t start
= handler
->zi_record
.zi_start
;
280 uint64_t end
= handler
->zi_record
.zi_end
;
282 if (handler
->zi_record
.zi_cmd
!= ZINJECT_LABEL_FAULT
)
286 * The injection region is the relative offsets within a
287 * vdev label. We must determine the label which is being
288 * updated and adjust our region accordingly.
290 label
= vdev_label_number(vd
->vdev_psize
, offset
);
291 start
= vdev_label_offset(vd
->vdev_psize
, label
, start
);
292 end
= vdev_label_offset(vd
->vdev_psize
, label
, end
);
294 if (zio
->io_vd
->vdev_guid
== handler
->zi_record
.zi_guid
&&
295 (offset
>= start
&& offset
<= end
)) {
300 rw_exit(&inject_lock
);
306 zio_inject_bitflip_cb(void *data
, size_t len
, void *private)
308 ASSERTV(zio_t
*zio
= private);
309 uint8_t *buffer
= data
;
310 uint_t byte
= spa_get_random(len
);
312 ASSERT(zio
->io_type
== ZIO_TYPE_READ
);
314 /* flip a single random bit in an abd data buffer */
315 buffer
[byte
] ^= 1 << spa_get_random(8);
317 return (1); /* stop after first flip */
321 zio_handle_device_injection_impl(vdev_t
*vd
, zio_t
*zio
, int err1
, int err2
)
323 inject_handler_t
*handler
;
327 * We skip over faults in the labels unless it's during
328 * device open (i.e. zio == NULL).
331 uint64_t offset
= zio
->io_offset
;
333 if (offset
< VDEV_LABEL_START_SIZE
||
334 offset
>= vd
->vdev_psize
- VDEV_LABEL_END_SIZE
)
338 rw_enter(&inject_lock
, RW_READER
);
340 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
341 handler
= list_next(&inject_handlers
, handler
)) {
343 if (handler
->zi_record
.zi_cmd
!= ZINJECT_DEVICE_FAULT
)
346 if (vd
->vdev_guid
== handler
->zi_record
.zi_guid
) {
347 if (handler
->zi_record
.zi_failfast
&&
348 (zio
== NULL
|| (zio
->io_flags
&
349 (ZIO_FLAG_IO_RETRY
| ZIO_FLAG_TRYHARD
)))) {
353 /* Handle type specific I/O failures */
355 handler
->zi_record
.zi_iotype
!= ZIO_TYPES
&&
356 handler
->zi_record
.zi_iotype
!= zio
->io_type
)
359 if (handler
->zi_record
.zi_error
== err1
||
360 handler
->zi_record
.zi_error
== err2
) {
362 * limit error injection if requested
364 if (!freq_triggered(handler
->zi_record
.zi_freq
))
368 * For a failed open, pretend like the device
372 vd
->vdev_stat
.vs_aux
=
373 VDEV_AUX_OPEN_FAILED
;
376 * Treat these errors as if they had been
377 * retried so that all the appropriate stats
378 * and FMA events are generated.
380 if (!handler
->zi_record
.zi_failfast
&&
382 zio
->io_flags
|= ZIO_FLAG_IO_RETRY
;
385 * EILSEQ means flip a bit after a read
387 if (handler
->zi_record
.zi_error
== EILSEQ
) {
391 /* locate buffer data and flip a bit */
392 (void) abd_iterate_func(zio
->io_abd
, 0,
393 zio
->io_size
, zio_inject_bitflip_cb
,
398 ret
= handler
->zi_record
.zi_error
;
401 if (handler
->zi_record
.zi_error
== ENXIO
) {
402 ret
= SET_ERROR(EIO
);
408 rw_exit(&inject_lock
);
414 zio_handle_device_injection(vdev_t
*vd
, zio_t
*zio
, int error
)
416 return (zio_handle_device_injection_impl(vd
, zio
, error
, INT_MAX
));
420 zio_handle_device_injections(vdev_t
*vd
, zio_t
*zio
, int err1
, int err2
)
422 return (zio_handle_device_injection_impl(vd
, zio
, err1
, err2
));
426 * Simulate hardware that ignores cache flushes. For requested number
427 * of seconds nix the actual writing to disk.
430 zio_handle_ignored_writes(zio_t
*zio
)
432 inject_handler_t
*handler
;
434 rw_enter(&inject_lock
, RW_READER
);
436 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
437 handler
= list_next(&inject_handlers
, handler
)) {
439 /* Ignore errors not destined for this pool */
440 if (zio
->io_spa
!= handler
->zi_spa
||
441 handler
->zi_record
.zi_cmd
!= ZINJECT_IGNORED_WRITES
)
445 * Positive duration implies # of seconds, negative
448 if (handler
->zi_record
.zi_timer
== 0) {
449 if (handler
->zi_record
.zi_duration
> 0)
450 handler
->zi_record
.zi_timer
= ddi_get_lbolt64();
452 handler
->zi_record
.zi_timer
= zio
->io_txg
;
455 /* Have a "problem" writing 60% of the time */
456 if (spa_get_random(100) < 60)
457 zio
->io_pipeline
&= ~ZIO_VDEV_IO_STAGES
;
461 rw_exit(&inject_lock
);
465 spa_handle_ignored_writes(spa_t
*spa
)
467 inject_handler_t
*handler
;
469 if (zio_injection_enabled
== 0)
472 rw_enter(&inject_lock
, RW_READER
);
474 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
475 handler
= list_next(&inject_handlers
, handler
)) {
477 if (spa
!= handler
->zi_spa
||
478 handler
->zi_record
.zi_cmd
!= ZINJECT_IGNORED_WRITES
)
481 if (handler
->zi_record
.zi_duration
> 0) {
482 VERIFY(handler
->zi_record
.zi_timer
== 0 ||
484 (int64_t)handler
->zi_record
.zi_timer
+
485 handler
->zi_record
.zi_duration
* hz
,
488 /* duration is negative so the subtraction here adds */
489 VERIFY(handler
->zi_record
.zi_timer
== 0 ||
490 handler
->zi_record
.zi_timer
-
491 handler
->zi_record
.zi_duration
>=
492 spa_syncing_txg(spa
));
496 rw_exit(&inject_lock
);
500 zio_handle_io_delay(zio_t
*zio
)
502 vdev_t
*vd
= zio
->io_vd
;
503 inject_handler_t
*min_handler
= NULL
;
504 hrtime_t min_target
= 0;
506 rw_enter(&inject_lock
, RW_READER
);
509 * inject_delay_count is a subset of zio_injection_enabled that
510 * is only incremented for delay handlers. These checks are
511 * mainly added to remind the reader why we're not explicitly
512 * checking zio_injection_enabled like the other functions.
514 IMPLY(inject_delay_count
> 0, zio_injection_enabled
> 0);
515 IMPLY(zio_injection_enabled
== 0, inject_delay_count
== 0);
518 * If there aren't any inject delay handlers registered, then we
519 * can short circuit and simply return 0 here. A value of zero
520 * informs zio_delay_interrupt() that this request should not be
521 * delayed. This short circuit keeps us from acquiring the
522 * inject_delay_mutex unnecessarily.
524 if (inject_delay_count
== 0) {
525 rw_exit(&inject_lock
);
530 * Each inject handler has a number of "lanes" associated with
531 * it. Each lane is able to handle requests independently of one
532 * another, and at a latency defined by the inject handler
533 * record's zi_timer field. Thus if a handler in configured with
534 * a single lane with a 10ms latency, it will delay requests
535 * such that only a single request is completed every 10ms. So,
536 * if more than one request is attempted per each 10ms interval,
537 * the average latency of the requests will be greater than
538 * 10ms; but if only a single request is submitted each 10ms
539 * interval the average latency will be 10ms.
541 * We need to acquire this mutex to prevent multiple concurrent
542 * threads being assigned to the same lane of a given inject
543 * handler. The mutex allows us to perform the following two
544 * operations atomically:
546 * 1. determine the minimum handler and minimum target
547 * value of all the possible handlers
548 * 2. update that minimum handler's lane array
550 * Without atomicity, two (or more) threads could pick the same
551 * lane in step (1), and then conflict with each other in step
552 * (2). This could allow a single lane handler to process
553 * multiple requests simultaneously, which shouldn't be possible.
555 mutex_enter(&inject_delay_mtx
);
557 for (inject_handler_t
*handler
= list_head(&inject_handlers
);
558 handler
!= NULL
; handler
= list_next(&inject_handlers
, handler
)) {
559 if (handler
->zi_record
.zi_cmd
!= ZINJECT_DELAY_IO
)
562 if (!freq_triggered(handler
->zi_record
.zi_freq
))
565 if (vd
->vdev_guid
!= handler
->zi_record
.zi_guid
)
569 * Defensive; should never happen as the array allocation
570 * occurs prior to inserting this handler on the list.
572 ASSERT3P(handler
->zi_lanes
, !=, NULL
);
575 * This should never happen, the zinject command should
576 * prevent a user from setting an IO delay with zero lanes.
578 ASSERT3U(handler
->zi_record
.zi_nlanes
, !=, 0);
580 ASSERT3U(handler
->zi_record
.zi_nlanes
, >,
581 handler
->zi_next_lane
);
584 * We want to issue this IO to the lane that will become
585 * idle the soonest, so we compare the soonest this
586 * specific handler can complete the IO with all other
587 * handlers, to find the lowest value of all possible
588 * lanes. We then use this lane to submit the request.
590 * Since each handler has a constant value for its
591 * delay, we can just use the "next" lane for that
592 * handler; as it will always be the lane with the
593 * lowest value for that particular handler (i.e. the
594 * lane that will become idle the soonest). This saves a
595 * scan of each handler's lanes array.
597 * There's two cases to consider when determining when
598 * this specific IO request should complete. If this
599 * lane is idle, we want to "submit" the request now so
600 * it will complete after zi_timer milliseconds. Thus,
601 * we set the target to now + zi_timer.
603 * If the lane is busy, we want this request to complete
604 * zi_timer milliseconds after the lane becomes idle.
605 * Since the 'zi_lanes' array holds the time at which
606 * each lane will become idle, we use that value to
607 * determine when this request should complete.
609 hrtime_t idle
= handler
->zi_record
.zi_timer
+ gethrtime();
610 hrtime_t busy
= handler
->zi_record
.zi_timer
+
611 handler
->zi_lanes
[handler
->zi_next_lane
];
612 hrtime_t target
= MAX(idle
, busy
);
614 if (min_handler
== NULL
) {
615 min_handler
= handler
;
620 ASSERT3P(min_handler
, !=, NULL
);
621 ASSERT3U(min_target
, !=, 0);
624 * We don't yet increment the "next lane" variable since
625 * we still might find a lower value lane in another
626 * handler during any remaining iterations. Once we're
627 * sure we've selected the absolute minimum, we'll claim
628 * the lane and increment the handler's "next lane"
632 if (target
< min_target
) {
633 min_handler
= handler
;
639 * 'min_handler' will be NULL if no IO delays are registered for
640 * this vdev, otherwise it will point to the handler containing
641 * the lane that will become idle the soonest.
643 if (min_handler
!= NULL
) {
644 ASSERT3U(min_target
, !=, 0);
645 min_handler
->zi_lanes
[min_handler
->zi_next_lane
] = min_target
;
648 * If we've used all possible lanes for this handler,
649 * loop back and start using the first lane again;
650 * otherwise, just increment the lane index.
652 min_handler
->zi_next_lane
= (min_handler
->zi_next_lane
+ 1) %
653 min_handler
->zi_record
.zi_nlanes
;
656 mutex_exit(&inject_delay_mtx
);
657 rw_exit(&inject_lock
);
663 * Create a new handler for the given record. We add it to the list, adding
664 * a reference to the spa_t in the process. We increment zio_injection_enabled,
665 * which is the switch to trigger all fault injection.
668 zio_inject_fault(char *name
, int flags
, int *id
, zinject_record_t
*record
)
670 inject_handler_t
*handler
;
675 * If this is pool-wide metadata, make sure we unload the corresponding
676 * spa_t, so that the next attempt to load it will trigger the fault.
677 * We call spa_reset() to unload the pool appropriately.
679 if (flags
& ZINJECT_UNLOAD_SPA
)
680 if ((error
= spa_reset(name
)) != 0)
683 if (record
->zi_cmd
== ZINJECT_DELAY_IO
) {
685 * A value of zero for the number of lanes or for the
686 * delay time doesn't make sense.
688 if (record
->zi_timer
== 0 || record
->zi_nlanes
== 0)
689 return (SET_ERROR(EINVAL
));
692 * The number of lanes is directly mapped to the size of
693 * an array used by the handler. Thus, to ensure the
694 * user doesn't trigger an allocation that's "too large"
695 * we cap the number of lanes here.
697 if (record
->zi_nlanes
>= UINT16_MAX
)
698 return (SET_ERROR(EINVAL
));
701 if (!(flags
& ZINJECT_NULL
)) {
703 * spa_inject_ref() will add an injection reference, which will
704 * prevent the pool from being removed from the namespace while
705 * still allowing it to be unloaded.
707 if ((spa
= spa_inject_addref(name
)) == NULL
)
708 return (SET_ERROR(ENOENT
));
710 handler
= kmem_alloc(sizeof (inject_handler_t
), KM_SLEEP
);
712 handler
->zi_spa
= spa
;
713 handler
->zi_record
= *record
;
715 if (handler
->zi_record
.zi_cmd
== ZINJECT_DELAY_IO
) {
716 handler
->zi_lanes
= kmem_zalloc(
717 sizeof (*handler
->zi_lanes
) *
718 handler
->zi_record
.zi_nlanes
, KM_SLEEP
);
719 handler
->zi_next_lane
= 0;
721 handler
->zi_lanes
= NULL
;
722 handler
->zi_next_lane
= 0;
725 rw_enter(&inject_lock
, RW_WRITER
);
728 * We can't move this increment into the conditional
729 * above because we need to hold the RW_WRITER lock of
730 * inject_lock, and we don't want to hold that while
731 * allocating the handler's zi_lanes array.
733 if (handler
->zi_record
.zi_cmd
== ZINJECT_DELAY_IO
) {
734 ASSERT3S(inject_delay_count
, >=, 0);
735 inject_delay_count
++;
736 ASSERT3S(inject_delay_count
, >, 0);
739 *id
= handler
->zi_id
= inject_next_id
++;
740 list_insert_tail(&inject_handlers
, handler
);
741 atomic_inc_32(&zio_injection_enabled
);
743 rw_exit(&inject_lock
);
747 * Flush the ARC, so that any attempts to read this data will end up
748 * going to the ZIO layer. Note that this is a little overkill, but
749 * we don't have the necessary ARC interfaces to do anything else, and
750 * fault injection isn't a performance critical path.
752 if (flags
& ZINJECT_FLUSH_ARC
)
754 * We must use FALSE to ensure arc_flush returns, since
755 * we're not preventing concurrent ARC insertions.
757 arc_flush(NULL
, FALSE
);
763 * Returns the next record with an ID greater than that supplied to the
764 * function. Used to iterate over all handlers in the system.
767 zio_inject_list_next(int *id
, char *name
, size_t buflen
,
768 zinject_record_t
*record
)
770 inject_handler_t
*handler
;
773 mutex_enter(&spa_namespace_lock
);
774 rw_enter(&inject_lock
, RW_READER
);
776 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
777 handler
= list_next(&inject_handlers
, handler
))
778 if (handler
->zi_id
> *id
)
782 *record
= handler
->zi_record
;
783 *id
= handler
->zi_id
;
784 (void) strncpy(name
, spa_name(handler
->zi_spa
), buflen
);
787 ret
= SET_ERROR(ENOENT
);
790 rw_exit(&inject_lock
);
791 mutex_exit(&spa_namespace_lock
);
797 * Clear the fault handler with the given identifier, or return ENOENT if none
801 zio_clear_fault(int id
)
803 inject_handler_t
*handler
;
805 rw_enter(&inject_lock
, RW_WRITER
);
807 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
808 handler
= list_next(&inject_handlers
, handler
))
809 if (handler
->zi_id
== id
)
812 if (handler
== NULL
) {
813 rw_exit(&inject_lock
);
814 return (SET_ERROR(ENOENT
));
817 if (handler
->zi_record
.zi_cmd
== ZINJECT_DELAY_IO
) {
818 ASSERT3S(inject_delay_count
, >, 0);
819 inject_delay_count
--;
820 ASSERT3S(inject_delay_count
, >=, 0);
823 list_remove(&inject_handlers
, handler
);
824 rw_exit(&inject_lock
);
826 if (handler
->zi_record
.zi_cmd
== ZINJECT_DELAY_IO
) {
827 ASSERT3P(handler
->zi_lanes
, !=, NULL
);
828 kmem_free(handler
->zi_lanes
, sizeof (*handler
->zi_lanes
) *
829 handler
->zi_record
.zi_nlanes
);
831 ASSERT3P(handler
->zi_lanes
, ==, NULL
);
834 spa_inject_delref(handler
->zi_spa
);
835 kmem_free(handler
, sizeof (inject_handler_t
));
836 atomic_dec_32(&zio_injection_enabled
);
842 zio_inject_init(void)
844 rw_init(&inject_lock
, NULL
, RW_DEFAULT
, NULL
);
845 mutex_init(&inject_delay_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
846 list_create(&inject_handlers
, sizeof (inject_handler_t
),
847 offsetof(inject_handler_t
, zi_link
));
851 zio_inject_fini(void)
853 list_destroy(&inject_handlers
);
854 mutex_destroy(&inject_delay_mtx
);
855 rw_destroy(&inject_lock
);
859 EXPORT_SYMBOL(zio_injection_enabled
);
860 EXPORT_SYMBOL(zio_inject_fault
);
861 EXPORT_SYMBOL(zio_inject_list_next
);
862 EXPORT_SYMBOL(zio_clear_fault
);
863 EXPORT_SYMBOL(zio_handle_fault_injection
);
864 EXPORT_SYMBOL(zio_handle_device_injection
);
865 EXPORT_SYMBOL(zio_handle_label_injection
);