4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
24 * Copyright (c) 2017, Intel Corporation.
30 * To handle fault injection, we keep track of a series of zinject_record_t
31 * structures which describe which logical block(s) should be injected with a
32 * fault. These are kept in a global list. Each record corresponds to a given
33 * spa_t and maintains a special hold on the spa_t so that it cannot be deleted
34 * or exported while the injection record exists.
36 * Device level injection is done using the 'zi_guid' field. If this is set, it
37 * means that the error is destined for a particular device, not a piece of
40 * This is a rather poor data structure and algorithm, but we don't expect more
41 * than a few faults at any one time, so it should be sufficient for our needs.
46 #include <sys/zfs_ioctl.h>
47 #include <sys/vdev_impl.h>
48 #include <sys/dmu_objset.h>
49 #include <sys/dsl_dataset.h>
50 #include <sys/fs/zfs.h>
52 uint32_t zio_injection_enabled
= 0;
55 * Data describing each zinject handler registered on the system, and
56 * contains the list node linking the handler in the global zinject
59 typedef struct inject_handler
{
62 zinject_record_t zi_record
;
69 * List of all zinject handlers registered on the system, protected by
70 * the inject_lock defined below.
72 static list_t inject_handlers
;
75 * This protects insertion into, and traversal of, the inject handler
76 * list defined above; as well as the inject_delay_count. Any time a
77 * handler is inserted or removed from the list, this lock should be
78 * taken as a RW_WRITER; and any time traversal is done over the list
79 * (without modification to it) this lock should be taken as a RW_READER.
81 static krwlock_t inject_lock
;
84 * This holds the number of zinject delay handlers that have been
85 * registered on the system. It is protected by the inject_lock defined
86 * above. Thus modifications to this count must be a RW_WRITER of the
87 * inject_lock, and reads of this count must be (at least) a RW_READER
90 static int inject_delay_count
= 0;
93 * This lock is used only in zio_handle_io_delay(), refer to the comment
94 * in that function for more details.
96 static kmutex_t inject_delay_mtx
;
99 * Used to assign unique identifying numbers to each new zinject handler.
101 static int inject_next_id
= 1;
104 * Test if the requested frequency was triggered
107 freq_triggered(uint32_t frequency
)
110 * zero implies always (100%)
116 * Note: we still handle legacy (unscaled) frequecy values
118 uint32_t maximum
= (frequency
<= 100) ? 100 : ZI_PERCENTAGE_MAX
;
120 return (spa_get_random(maximum
) < frequency
);
124 * Returns true if the given record matches the I/O in progress.
127 zio_match_handler(const zbookmark_phys_t
*zb
, uint64_t type
, int dva
,
128 zinject_record_t
*record
, int error
)
131 * Check for a match against the MOS, which is based on type
133 if (zb
->zb_objset
== DMU_META_OBJSET
&&
134 record
->zi_objset
== DMU_META_OBJSET
&&
135 record
->zi_object
== DMU_META_DNODE_OBJECT
) {
136 if (record
->zi_type
== DMU_OT_NONE
||
137 type
== record
->zi_type
)
138 return (freq_triggered(record
->zi_freq
));
144 * Check for an exact match.
146 if (zb
->zb_objset
== record
->zi_objset
&&
147 zb
->zb_object
== record
->zi_object
&&
148 zb
->zb_level
== record
->zi_level
&&
149 zb
->zb_blkid
>= record
->zi_start
&&
150 zb
->zb_blkid
<= record
->zi_end
&&
151 (record
->zi_dvas
== 0 || (record
->zi_dvas
& (1ULL << dva
))) &&
152 error
== record
->zi_error
) {
153 return (freq_triggered(record
->zi_freq
));
160 * Panic the system when a config change happens in the function
164 zio_handle_panic_injection(spa_t
*spa
, char *tag
, uint64_t type
)
166 inject_handler_t
*handler
;
168 rw_enter(&inject_lock
, RW_READER
);
170 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
171 handler
= list_next(&inject_handlers
, handler
)) {
173 if (spa
!= handler
->zi_spa
)
176 if (handler
->zi_record
.zi_type
== type
&&
177 strcmp(tag
, handler
->zi_record
.zi_func
) == 0)
178 panic("Panic requested in function %s\n", tag
);
181 rw_exit(&inject_lock
);
185 * Inject a decryption failure. Decryption failures can occur in
186 * both the ARC and the ZIO layers.
189 zio_handle_decrypt_injection(spa_t
*spa
, const zbookmark_phys_t
*zb
,
190 uint64_t type
, int error
)
193 inject_handler_t
*handler
;
195 rw_enter(&inject_lock
, RW_READER
);
197 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
198 handler
= list_next(&inject_handlers
, handler
)) {
200 if (spa
!= handler
->zi_spa
||
201 handler
->zi_record
.zi_cmd
!= ZINJECT_DECRYPT_FAULT
)
204 if (zio_match_handler(zb
, type
, ZI_NO_DVA
,
205 &handler
->zi_record
, error
)) {
211 rw_exit(&inject_lock
);
216 * If this is a physical I/O for a vdev child determine which DVA it is
217 * for. We iterate backwards through the DVAs matching on the offset so
218 * that we end up with ZI_NO_DVA (-1) if we don't find a match.
221 zio_match_dva(zio_t
*zio
)
225 if (zio
->io_bp
!= NULL
&& zio
->io_vd
!= NULL
&&
226 zio
->io_child_type
== ZIO_CHILD_VDEV
) {
227 for (i
= BP_GET_NDVAS(zio
->io_bp
) - 1; i
>= 0; i
--) {
228 dva_t
*dva
= &zio
->io_bp
->blk_dva
[i
];
229 uint64_t off
= DVA_GET_OFFSET(dva
);
230 vdev_t
*vd
= vdev_lookup_top(zio
->io_spa
,
233 /* Compensate for vdev label added to leaves */
234 if (zio
->io_vd
->vdev_ops
->vdev_op_leaf
)
235 off
+= VDEV_LABEL_START_SIZE
;
237 if (zio
->io_vd
== vd
&& zio
->io_offset
== off
)
247 * Determine if the I/O in question should return failure. Returns the errno
248 * to be returned to the caller.
251 zio_handle_fault_injection(zio_t
*zio
, int error
)
254 inject_handler_t
*handler
;
257 * Ignore I/O not associated with any logical data.
259 if (zio
->io_logical
== NULL
)
263 * Currently, we only support fault injection on reads.
265 if (zio
->io_type
!= ZIO_TYPE_READ
)
268 rw_enter(&inject_lock
, RW_READER
);
270 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
271 handler
= list_next(&inject_handlers
, handler
)) {
272 if (zio
->io_spa
!= handler
->zi_spa
||
273 handler
->zi_record
.zi_cmd
!= ZINJECT_DATA_FAULT
)
276 /* If this handler matches, return the specified error */
277 if (zio_match_handler(&zio
->io_logical
->io_bookmark
,
278 zio
->io_bp
? BP_GET_TYPE(zio
->io_bp
) : DMU_OT_NONE
,
279 zio_match_dva(zio
), &handler
->zi_record
, error
)) {
285 rw_exit(&inject_lock
);
291 * Determine if the zio is part of a label update and has an injection
292 * handler associated with that portion of the label. Currently, we
293 * allow error injection in either the nvlist or the uberblock region of
297 zio_handle_label_injection(zio_t
*zio
, int error
)
299 inject_handler_t
*handler
;
300 vdev_t
*vd
= zio
->io_vd
;
301 uint64_t offset
= zio
->io_offset
;
305 if (offset
>= VDEV_LABEL_START_SIZE
&&
306 offset
< vd
->vdev_psize
- VDEV_LABEL_END_SIZE
)
309 rw_enter(&inject_lock
, RW_READER
);
311 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
312 handler
= list_next(&inject_handlers
, handler
)) {
313 uint64_t start
= handler
->zi_record
.zi_start
;
314 uint64_t end
= handler
->zi_record
.zi_end
;
316 if (handler
->zi_record
.zi_cmd
!= ZINJECT_LABEL_FAULT
)
320 * The injection region is the relative offsets within a
321 * vdev label. We must determine the label which is being
322 * updated and adjust our region accordingly.
324 label
= vdev_label_number(vd
->vdev_psize
, offset
);
325 start
= vdev_label_offset(vd
->vdev_psize
, label
, start
);
326 end
= vdev_label_offset(vd
->vdev_psize
, label
, end
);
328 if (zio
->io_vd
->vdev_guid
== handler
->zi_record
.zi_guid
&&
329 (offset
>= start
&& offset
<= end
)) {
334 rw_exit(&inject_lock
);
340 zio_inject_bitflip_cb(void *data
, size_t len
, void *private)
342 ASSERTV(zio_t
*zio
= private);
343 uint8_t *buffer
= data
;
344 uint_t byte
= spa_get_random(len
);
346 ASSERT(zio
->io_type
== ZIO_TYPE_READ
);
348 /* flip a single random bit in an abd data buffer */
349 buffer
[byte
] ^= 1 << spa_get_random(8);
351 return (1); /* stop after first flip */
355 zio_handle_device_injection_impl(vdev_t
*vd
, zio_t
*zio
, int err1
, int err2
)
357 inject_handler_t
*handler
;
361 * We skip over faults in the labels unless it's during
362 * device open (i.e. zio == NULL).
365 uint64_t offset
= zio
->io_offset
;
367 if (offset
< VDEV_LABEL_START_SIZE
||
368 offset
>= vd
->vdev_psize
- VDEV_LABEL_END_SIZE
)
372 rw_enter(&inject_lock
, RW_READER
);
374 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
375 handler
= list_next(&inject_handlers
, handler
)) {
377 if (handler
->zi_record
.zi_cmd
!= ZINJECT_DEVICE_FAULT
)
380 if (vd
->vdev_guid
== handler
->zi_record
.zi_guid
) {
381 if (handler
->zi_record
.zi_failfast
&&
382 (zio
== NULL
|| (zio
->io_flags
&
383 (ZIO_FLAG_IO_RETRY
| ZIO_FLAG_TRYHARD
)))) {
387 /* Handle type specific I/O failures */
389 handler
->zi_record
.zi_iotype
!= ZIO_TYPES
&&
390 handler
->zi_record
.zi_iotype
!= zio
->io_type
)
393 if (handler
->zi_record
.zi_error
== err1
||
394 handler
->zi_record
.zi_error
== err2
) {
396 * limit error injection if requested
398 if (!freq_triggered(handler
->zi_record
.zi_freq
))
402 * For a failed open, pretend like the device
406 vd
->vdev_stat
.vs_aux
=
407 VDEV_AUX_OPEN_FAILED
;
410 * Treat these errors as if they had been
411 * retried so that all the appropriate stats
412 * and FMA events are generated.
414 if (!handler
->zi_record
.zi_failfast
&&
416 zio
->io_flags
|= ZIO_FLAG_IO_RETRY
;
419 * EILSEQ means flip a bit after a read
421 if (handler
->zi_record
.zi_error
== EILSEQ
) {
425 /* locate buffer data and flip a bit */
426 (void) abd_iterate_func(zio
->io_abd
, 0,
427 zio
->io_size
, zio_inject_bitflip_cb
,
432 ret
= handler
->zi_record
.zi_error
;
435 if (handler
->zi_record
.zi_error
== ENXIO
) {
436 ret
= SET_ERROR(EIO
);
442 rw_exit(&inject_lock
);
448 zio_handle_device_injection(vdev_t
*vd
, zio_t
*zio
, int error
)
450 return (zio_handle_device_injection_impl(vd
, zio
, error
, INT_MAX
));
454 zio_handle_device_injections(vdev_t
*vd
, zio_t
*zio
, int err1
, int err2
)
456 return (zio_handle_device_injection_impl(vd
, zio
, err1
, err2
));
460 * Simulate hardware that ignores cache flushes. For requested number
461 * of seconds nix the actual writing to disk.
464 zio_handle_ignored_writes(zio_t
*zio
)
466 inject_handler_t
*handler
;
468 rw_enter(&inject_lock
, RW_READER
);
470 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
471 handler
= list_next(&inject_handlers
, handler
)) {
473 /* Ignore errors not destined for this pool */
474 if (zio
->io_spa
!= handler
->zi_spa
||
475 handler
->zi_record
.zi_cmd
!= ZINJECT_IGNORED_WRITES
)
479 * Positive duration implies # of seconds, negative
482 if (handler
->zi_record
.zi_timer
== 0) {
483 if (handler
->zi_record
.zi_duration
> 0)
484 handler
->zi_record
.zi_timer
= ddi_get_lbolt64();
486 handler
->zi_record
.zi_timer
= zio
->io_txg
;
489 /* Have a "problem" writing 60% of the time */
490 if (spa_get_random(100) < 60)
491 zio
->io_pipeline
&= ~ZIO_VDEV_IO_STAGES
;
495 rw_exit(&inject_lock
);
499 spa_handle_ignored_writes(spa_t
*spa
)
501 inject_handler_t
*handler
;
503 if (zio_injection_enabled
== 0)
506 rw_enter(&inject_lock
, RW_READER
);
508 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
509 handler
= list_next(&inject_handlers
, handler
)) {
511 if (spa
!= handler
->zi_spa
||
512 handler
->zi_record
.zi_cmd
!= ZINJECT_IGNORED_WRITES
)
515 if (handler
->zi_record
.zi_duration
> 0) {
516 VERIFY(handler
->zi_record
.zi_timer
== 0 ||
518 (int64_t)handler
->zi_record
.zi_timer
+
519 handler
->zi_record
.zi_duration
* hz
,
522 /* duration is negative so the subtraction here adds */
523 VERIFY(handler
->zi_record
.zi_timer
== 0 ||
524 handler
->zi_record
.zi_timer
-
525 handler
->zi_record
.zi_duration
>=
526 spa_syncing_txg(spa
));
530 rw_exit(&inject_lock
);
534 zio_handle_io_delay(zio_t
*zio
)
536 vdev_t
*vd
= zio
->io_vd
;
537 inject_handler_t
*min_handler
= NULL
;
538 hrtime_t min_target
= 0;
540 rw_enter(&inject_lock
, RW_READER
);
543 * inject_delay_count is a subset of zio_injection_enabled that
544 * is only incremented for delay handlers. These checks are
545 * mainly added to remind the reader why we're not explicitly
546 * checking zio_injection_enabled like the other functions.
548 IMPLY(inject_delay_count
> 0, zio_injection_enabled
> 0);
549 IMPLY(zio_injection_enabled
== 0, inject_delay_count
== 0);
552 * If there aren't any inject delay handlers registered, then we
553 * can short circuit and simply return 0 here. A value of zero
554 * informs zio_delay_interrupt() that this request should not be
555 * delayed. This short circuit keeps us from acquiring the
556 * inject_delay_mutex unnecessarily.
558 if (inject_delay_count
== 0) {
559 rw_exit(&inject_lock
);
564 * Each inject handler has a number of "lanes" associated with
565 * it. Each lane is able to handle requests independently of one
566 * another, and at a latency defined by the inject handler
567 * record's zi_timer field. Thus if a handler in configured with
568 * a single lane with a 10ms latency, it will delay requests
569 * such that only a single request is completed every 10ms. So,
570 * if more than one request is attempted per each 10ms interval,
571 * the average latency of the requests will be greater than
572 * 10ms; but if only a single request is submitted each 10ms
573 * interval the average latency will be 10ms.
575 * We need to acquire this mutex to prevent multiple concurrent
576 * threads being assigned to the same lane of a given inject
577 * handler. The mutex allows us to perform the following two
578 * operations atomically:
580 * 1. determine the minimum handler and minimum target
581 * value of all the possible handlers
582 * 2. update that minimum handler's lane array
584 * Without atomicity, two (or more) threads could pick the same
585 * lane in step (1), and then conflict with each other in step
586 * (2). This could allow a single lane handler to process
587 * multiple requests simultaneously, which shouldn't be possible.
589 mutex_enter(&inject_delay_mtx
);
591 for (inject_handler_t
*handler
= list_head(&inject_handlers
);
592 handler
!= NULL
; handler
= list_next(&inject_handlers
, handler
)) {
593 if (handler
->zi_record
.zi_cmd
!= ZINJECT_DELAY_IO
)
596 if (!freq_triggered(handler
->zi_record
.zi_freq
))
599 if (vd
->vdev_guid
!= handler
->zi_record
.zi_guid
)
603 * Defensive; should never happen as the array allocation
604 * occurs prior to inserting this handler on the list.
606 ASSERT3P(handler
->zi_lanes
, !=, NULL
);
609 * This should never happen, the zinject command should
610 * prevent a user from setting an IO delay with zero lanes.
612 ASSERT3U(handler
->zi_record
.zi_nlanes
, !=, 0);
614 ASSERT3U(handler
->zi_record
.zi_nlanes
, >,
615 handler
->zi_next_lane
);
618 * We want to issue this IO to the lane that will become
619 * idle the soonest, so we compare the soonest this
620 * specific handler can complete the IO with all other
621 * handlers, to find the lowest value of all possible
622 * lanes. We then use this lane to submit the request.
624 * Since each handler has a constant value for its
625 * delay, we can just use the "next" lane for that
626 * handler; as it will always be the lane with the
627 * lowest value for that particular handler (i.e. the
628 * lane that will become idle the soonest). This saves a
629 * scan of each handler's lanes array.
631 * There's two cases to consider when determining when
632 * this specific IO request should complete. If this
633 * lane is idle, we want to "submit" the request now so
634 * it will complete after zi_timer milliseconds. Thus,
635 * we set the target to now + zi_timer.
637 * If the lane is busy, we want this request to complete
638 * zi_timer milliseconds after the lane becomes idle.
639 * Since the 'zi_lanes' array holds the time at which
640 * each lane will become idle, we use that value to
641 * determine when this request should complete.
643 hrtime_t idle
= handler
->zi_record
.zi_timer
+ gethrtime();
644 hrtime_t busy
= handler
->zi_record
.zi_timer
+
645 handler
->zi_lanes
[handler
->zi_next_lane
];
646 hrtime_t target
= MAX(idle
, busy
);
648 if (min_handler
== NULL
) {
649 min_handler
= handler
;
654 ASSERT3P(min_handler
, !=, NULL
);
655 ASSERT3U(min_target
, !=, 0);
658 * We don't yet increment the "next lane" variable since
659 * we still might find a lower value lane in another
660 * handler during any remaining iterations. Once we're
661 * sure we've selected the absolute minimum, we'll claim
662 * the lane and increment the handler's "next lane"
666 if (target
< min_target
) {
667 min_handler
= handler
;
673 * 'min_handler' will be NULL if no IO delays are registered for
674 * this vdev, otherwise it will point to the handler containing
675 * the lane that will become idle the soonest.
677 if (min_handler
!= NULL
) {
678 ASSERT3U(min_target
, !=, 0);
679 min_handler
->zi_lanes
[min_handler
->zi_next_lane
] = min_target
;
682 * If we've used all possible lanes for this handler,
683 * loop back and start using the first lane again;
684 * otherwise, just increment the lane index.
686 min_handler
->zi_next_lane
= (min_handler
->zi_next_lane
+ 1) %
687 min_handler
->zi_record
.zi_nlanes
;
690 mutex_exit(&inject_delay_mtx
);
691 rw_exit(&inject_lock
);
697 zio_calculate_range(const char *pool
, zinject_record_t
*record
)
706 * Obtain the dnode for object using pool, objset, and object
708 error
= dsl_pool_hold(pool
, FTAG
, &dp
);
712 error
= dsl_dataset_hold_obj(dp
, record
->zi_objset
, FTAG
, &ds
);
713 dsl_pool_rele(dp
, FTAG
);
717 error
= dmu_objset_from_ds(ds
, &os
);
718 dsl_dataset_rele(ds
, FTAG
);
722 error
= dnode_hold(os
, record
->zi_object
, FTAG
, &dn
);
727 * Translate the range into block IDs
729 if (record
->zi_start
!= 0 || record
->zi_end
!= -1ULL) {
730 record
->zi_start
>>= dn
->dn_datablkshift
;
731 record
->zi_end
>>= dn
->dn_datablkshift
;
733 if (record
->zi_level
> 0) {
734 if (record
->zi_level
>= dn
->dn_nlevels
) {
735 dnode_rele(dn
, FTAG
);
736 return (SET_ERROR(EDOM
));
739 if (record
->zi_start
!= 0 || record
->zi_end
!= 0) {
740 int shift
= dn
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
742 for (int level
= record
->zi_level
; level
> 0; level
--) {
743 record
->zi_start
>>= shift
;
744 record
->zi_end
>>= shift
;
749 dnode_rele(dn
, FTAG
);
754 * Create a new handler for the given record. We add it to the list, adding
755 * a reference to the spa_t in the process. We increment zio_injection_enabled,
756 * which is the switch to trigger all fault injection.
759 zio_inject_fault(char *name
, int flags
, int *id
, zinject_record_t
*record
)
761 inject_handler_t
*handler
;
766 * If this is pool-wide metadata, make sure we unload the corresponding
767 * spa_t, so that the next attempt to load it will trigger the fault.
768 * We call spa_reset() to unload the pool appropriately.
770 if (flags
& ZINJECT_UNLOAD_SPA
)
771 if ((error
= spa_reset(name
)) != 0)
774 if (record
->zi_cmd
== ZINJECT_DELAY_IO
) {
776 * A value of zero for the number of lanes or for the
777 * delay time doesn't make sense.
779 if (record
->zi_timer
== 0 || record
->zi_nlanes
== 0)
780 return (SET_ERROR(EINVAL
));
783 * The number of lanes is directly mapped to the size of
784 * an array used by the handler. Thus, to ensure the
785 * user doesn't trigger an allocation that's "too large"
786 * we cap the number of lanes here.
788 if (record
->zi_nlanes
>= UINT16_MAX
)
789 return (SET_ERROR(EINVAL
));
793 * If the supplied range was in bytes -- calculate the actual blkid
795 if (flags
& ZINJECT_CALC_RANGE
) {
796 error
= zio_calculate_range(name
, record
);
801 if (!(flags
& ZINJECT_NULL
)) {
803 * spa_inject_ref() will add an injection reference, which will
804 * prevent the pool from being removed from the namespace while
805 * still allowing it to be unloaded.
807 if ((spa
= spa_inject_addref(name
)) == NULL
)
808 return (SET_ERROR(ENOENT
));
810 handler
= kmem_alloc(sizeof (inject_handler_t
), KM_SLEEP
);
812 handler
->zi_spa
= spa
;
813 handler
->zi_record
= *record
;
815 if (handler
->zi_record
.zi_cmd
== ZINJECT_DELAY_IO
) {
816 handler
->zi_lanes
= kmem_zalloc(
817 sizeof (*handler
->zi_lanes
) *
818 handler
->zi_record
.zi_nlanes
, KM_SLEEP
);
819 handler
->zi_next_lane
= 0;
821 handler
->zi_lanes
= NULL
;
822 handler
->zi_next_lane
= 0;
825 rw_enter(&inject_lock
, RW_WRITER
);
828 * We can't move this increment into the conditional
829 * above because we need to hold the RW_WRITER lock of
830 * inject_lock, and we don't want to hold that while
831 * allocating the handler's zi_lanes array.
833 if (handler
->zi_record
.zi_cmd
== ZINJECT_DELAY_IO
) {
834 ASSERT3S(inject_delay_count
, >=, 0);
835 inject_delay_count
++;
836 ASSERT3S(inject_delay_count
, >, 0);
839 *id
= handler
->zi_id
= inject_next_id
++;
840 list_insert_tail(&inject_handlers
, handler
);
841 atomic_inc_32(&zio_injection_enabled
);
843 rw_exit(&inject_lock
);
847 * Flush the ARC, so that any attempts to read this data will end up
848 * going to the ZIO layer. Note that this is a little overkill, but
849 * we don't have the necessary ARC interfaces to do anything else, and
850 * fault injection isn't a performance critical path.
852 if (flags
& ZINJECT_FLUSH_ARC
)
854 * We must use FALSE to ensure arc_flush returns, since
855 * we're not preventing concurrent ARC insertions.
857 arc_flush(NULL
, FALSE
);
863 * Returns the next record with an ID greater than that supplied to the
864 * function. Used to iterate over all handlers in the system.
867 zio_inject_list_next(int *id
, char *name
, size_t buflen
,
868 zinject_record_t
*record
)
870 inject_handler_t
*handler
;
873 mutex_enter(&spa_namespace_lock
);
874 rw_enter(&inject_lock
, RW_READER
);
876 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
877 handler
= list_next(&inject_handlers
, handler
))
878 if (handler
->zi_id
> *id
)
882 *record
= handler
->zi_record
;
883 *id
= handler
->zi_id
;
884 (void) strncpy(name
, spa_name(handler
->zi_spa
), buflen
);
887 ret
= SET_ERROR(ENOENT
);
890 rw_exit(&inject_lock
);
891 mutex_exit(&spa_namespace_lock
);
897 * Clear the fault handler with the given identifier, or return ENOENT if none
901 zio_clear_fault(int id
)
903 inject_handler_t
*handler
;
905 rw_enter(&inject_lock
, RW_WRITER
);
907 for (handler
= list_head(&inject_handlers
); handler
!= NULL
;
908 handler
= list_next(&inject_handlers
, handler
))
909 if (handler
->zi_id
== id
)
912 if (handler
== NULL
) {
913 rw_exit(&inject_lock
);
914 return (SET_ERROR(ENOENT
));
917 if (handler
->zi_record
.zi_cmd
== ZINJECT_DELAY_IO
) {
918 ASSERT3S(inject_delay_count
, >, 0);
919 inject_delay_count
--;
920 ASSERT3S(inject_delay_count
, >=, 0);
923 list_remove(&inject_handlers
, handler
);
924 rw_exit(&inject_lock
);
926 if (handler
->zi_record
.zi_cmd
== ZINJECT_DELAY_IO
) {
927 ASSERT3P(handler
->zi_lanes
, !=, NULL
);
928 kmem_free(handler
->zi_lanes
, sizeof (*handler
->zi_lanes
) *
929 handler
->zi_record
.zi_nlanes
);
931 ASSERT3P(handler
->zi_lanes
, ==, NULL
);
934 spa_inject_delref(handler
->zi_spa
);
935 kmem_free(handler
, sizeof (inject_handler_t
));
936 atomic_dec_32(&zio_injection_enabled
);
942 zio_inject_init(void)
944 rw_init(&inject_lock
, NULL
, RW_DEFAULT
, NULL
);
945 mutex_init(&inject_delay_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
946 list_create(&inject_handlers
, sizeof (inject_handler_t
),
947 offsetof(inject_handler_t
, zi_link
));
951 zio_inject_fini(void)
953 list_destroy(&inject_handlers
);
954 mutex_destroy(&inject_delay_mtx
);
955 rw_destroy(&inject_lock
);
959 EXPORT_SYMBOL(zio_injection_enabled
);
960 EXPORT_SYMBOL(zio_inject_fault
);
961 EXPORT_SYMBOL(zio_inject_list_next
);
962 EXPORT_SYMBOL(zio_clear_fault
);
963 EXPORT_SYMBOL(zio_handle_fault_injection
);
964 EXPORT_SYMBOL(zio_handle_device_injection
);
965 EXPORT_SYMBOL(zio_handle_label_injection
);