4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2012 by Delphix. All rights reserved.
31 #include <sys/spa_impl.h>
33 #include <sys/vdev_impl.h>
35 #include <sys/zio_checksum.h>
37 #include <sys/fm/fs/zfs.h>
38 #include <sys/fm/protocol.h>
39 #include <sys/fm/util.h>
40 #include <sys/sysevent.h>
43 * This general routine is responsible for generating all the different ZFS
44 * ereports. The payload is dependent on the class, and which arguments are
45 * supplied to the function:
47 * EREPORT POOL VDEV IO
53 * If we are in a loading state, all errors are chained together by the same
54 * SPA-wide ENA (Error Numeric Association).
56 * For isolated I/O requests, we get the ENA from the zio_t. The propagation
57 * gets very complicated due to RAID-Z, gang blocks, and vdev caching. We want
58 * to chain together all ereports associated with a logical piece of data. For
59 * read I/Os, there are basically three 'types' of I/O, which form a roughly
63 * | Aggregate I/O | No associated logical data or device
67 * +---------------+ Reads associated with a piece of logical data.
68 * | Read I/O | This includes reads on behalf of RAID-Z,
69 * +---------------+ mirrors, gang blocks, retries, etc.
72 * +---------------+ Reads associated with a particular device, but
73 * | Physical I/O | no logical data. Issued as part of vdev caching
74 * +---------------+ and I/O aggregation.
76 * Note that 'physical I/O' here is not the same terminology as used in the rest
77 * of ZIO. Typically, 'physical I/O' simply means that there is no attached
78 * blockpointer. But I/O with no associated block pointer can still be related
79 * to a logical piece of data (i.e. RAID-Z requests).
81 * Purely physical I/O always have unique ENAs. They are not related to a
82 * particular piece of logical data, and therefore cannot be chained together.
83 * We still generate an ereport, but the DE doesn't correlate it with any
84 * logical piece of data. When such an I/O fails, the delegated I/O requests
85 * will issue a retry, which will trigger the 'real' ereport with the correct
88 * We keep track of the ENA for a ZIO chain through the 'io_logical' member.
89 * When a new logical I/O is issued, we set this to point to itself. Child I/Os
90 * then inherit this pointer, so that when it is first set subsequent failures
91 * will use the same ENA. For vdev cache fill and queue aggregation I/O,
92 * this pointer is set to NULL, and no ereport will be generated (since it
93 * doesn't actually correspond to any particular device or piece of data,
94 * and the caller will always retry without caching or queueing anyway).
96 * For checksum errors, we want to include more information about the actual
97 * error which occurs. Accordingly, we build an ereport when the error is
98 * noticed, but instead of sending it in immediately, we hang it off of the
99 * io_cksum_report field of the logical IO. When the logical IO completes
100 * (successfully or not), zfs_ereport_finish_checksum() is called with the
101 * good and bad versions of the buffer (if available), and we annotate the
102 * ereport with information about the differences.
106 zfs_zevent_post_cb(nvlist_t
*nvl
, nvlist_t
*detector
)
109 fm_nvlist_destroy(nvl
, FM_NVA_FREE
);
112 fm_nvlist_destroy(detector
, FM_NVA_FREE
);
116 zfs_ereport_start(nvlist_t
**ereport_out
, nvlist_t
**detector_out
,
117 const char *subclass
, spa_t
*spa
, vdev_t
*vd
, zio_t
*zio
,
118 uint64_t stateoroffset
, uint64_t size
)
120 nvlist_t
*ereport
, *detector
;
126 * If we are doing a spa_tryimport() or in recovery mode,
129 if (spa_load_state(spa
) == SPA_LOAD_TRYIMPORT
||
130 spa_load_state(spa
) == SPA_LOAD_RECOVER
)
134 * If we are in the middle of opening a pool, and the previous attempt
135 * failed, don't bother logging any new ereports - we're just going to
136 * get the same diagnosis anyway.
138 if (spa_load_state(spa
) != SPA_LOAD_NONE
&&
139 spa
->spa_last_open_failed
)
144 * If this is not a read or write zio, ignore the error. This
145 * can occur if the DKIOCFLUSHWRITECACHE ioctl fails.
147 if (zio
->io_type
!= ZIO_TYPE_READ
&&
148 zio
->io_type
!= ZIO_TYPE_WRITE
)
153 * If the vdev has already been marked as failing due
154 * to a failed probe, then ignore any subsequent I/O
155 * errors, as the DE will automatically fault the vdev
156 * on the first such failure. This also catches cases
157 * where vdev_remove_wanted is set and the device has
158 * not yet been asynchronously placed into the REMOVED
161 if (zio
->io_vd
== vd
&& !vdev_accessible(vd
, zio
))
165 * Ignore checksum errors for reads from DTL regions of
168 if (zio
->io_type
== ZIO_TYPE_READ
&&
169 zio
->io_error
== ECKSUM
&&
170 vd
->vdev_ops
->vdev_op_leaf
&&
171 vdev_dtl_contains(vd
, DTL_MISSING
, zio
->io_txg
, 1))
177 * For probe failure, we want to avoid posting ereports if we've
178 * already removed the device in the meantime.
181 strcmp(subclass
, FM_EREPORT_ZFS_PROBE_FAILURE
) == 0 &&
182 (vd
->vdev_remove_wanted
|| vd
->vdev_state
== VDEV_STATE_REMOVED
))
185 if ((ereport
= fm_nvlist_create(NULL
)) == NULL
)
188 if ((detector
= fm_nvlist_create(NULL
)) == NULL
) {
189 fm_nvlist_destroy(ereport
, FM_NVA_FREE
);
194 * Serialize ereport generation
196 mutex_enter(&spa
->spa_errlist_lock
);
199 * Determine the ENA to use for this event. If we are in a loading
200 * state, use a SPA-wide ENA. Otherwise, if we are in an I/O state, use
201 * a root zio-wide ENA. Otherwise, simply use a unique ENA.
203 if (spa_load_state(spa
) != SPA_LOAD_NONE
) {
204 if (spa
->spa_ena
== 0)
205 spa
->spa_ena
= fm_ena_generate(0, FM_ENA_FMT1
);
207 } else if (zio
!= NULL
&& zio
->io_logical
!= NULL
) {
208 if (zio
->io_logical
->io_ena
== 0)
209 zio
->io_logical
->io_ena
=
210 fm_ena_generate(0, FM_ENA_FMT1
);
211 ena
= zio
->io_logical
->io_ena
;
213 ena
= fm_ena_generate(0, FM_ENA_FMT1
);
217 * Construct the full class, detector, and other standard FMA fields.
219 (void) snprintf(class, sizeof (class), "%s.%s",
220 ZFS_ERROR_CLASS
, subclass
);
222 fm_fmri_zfs_set(detector
, FM_ZFS_SCHEME_VERSION
, spa_guid(spa
),
223 vd
!= NULL
? vd
->vdev_guid
: 0);
225 fm_ereport_set(ereport
, FM_EREPORT_VERSION
, class, ena
, detector
, NULL
);
228 * Construct the per-ereport payload, depending on which parameters are
233 * Generic payload members common to all ereports.
235 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_POOL
,
236 DATA_TYPE_STRING
, spa_name(spa
), FM_EREPORT_PAYLOAD_ZFS_POOL_GUID
,
237 DATA_TYPE_UINT64
, spa_guid(spa
),
238 FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT
, DATA_TYPE_INT32
,
239 spa_load_state(spa
), NULL
);
242 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_POOL_FAILMODE
,
244 spa_get_failmode(spa
) == ZIO_FAILURE_MODE_WAIT
?
245 FM_EREPORT_FAILMODE_WAIT
:
246 spa_get_failmode(spa
) == ZIO_FAILURE_MODE_CONTINUE
?
247 FM_EREPORT_FAILMODE_CONTINUE
: FM_EREPORT_FAILMODE_PANIC
,
252 vdev_t
*pvd
= vd
->vdev_parent
;
253 vdev_queue_t
*vq
= &vd
->vdev_queue
;
255 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID
,
256 DATA_TYPE_UINT64
, vd
->vdev_guid
,
257 FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE
,
258 DATA_TYPE_STRING
, vd
->vdev_ops
->vdev_op_type
, NULL
);
259 if (vd
->vdev_path
!= NULL
)
260 fm_payload_set(ereport
,
261 FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH
,
262 DATA_TYPE_STRING
, vd
->vdev_path
, NULL
);
263 if (vd
->vdev_devid
!= NULL
)
264 fm_payload_set(ereport
,
265 FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID
,
266 DATA_TYPE_STRING
, vd
->vdev_devid
, NULL
);
267 if (vd
->vdev_fru
!= NULL
)
268 fm_payload_set(ereport
,
269 FM_EREPORT_PAYLOAD_ZFS_VDEV_FRU
,
270 DATA_TYPE_STRING
, vd
->vdev_fru
, NULL
);
272 fm_payload_set(ereport
,
273 FM_EREPORT_PAYLOAD_ZFS_VDEV_ASHIFT
,
274 DATA_TYPE_UINT64
, vd
->vdev_ashift
, NULL
);
277 fm_payload_set(ereport
,
278 FM_EREPORT_PAYLOAD_ZFS_VDEV_COMP_TS
,
279 DATA_TYPE_UINT64
, vq
->vq_io_complete_ts
, NULL
);
280 fm_payload_set(ereport
,
281 FM_EREPORT_PAYLOAD_ZFS_VDEV_DELTA_TS
,
282 DATA_TYPE_UINT64
, vq
->vq_io_delta_ts
, NULL
);
286 fm_payload_set(ereport
,
287 FM_EREPORT_PAYLOAD_ZFS_PARENT_GUID
,
288 DATA_TYPE_UINT64
, pvd
->vdev_guid
,
289 FM_EREPORT_PAYLOAD_ZFS_PARENT_TYPE
,
290 DATA_TYPE_STRING
, pvd
->vdev_ops
->vdev_op_type
,
293 fm_payload_set(ereport
,
294 FM_EREPORT_PAYLOAD_ZFS_PARENT_PATH
,
295 DATA_TYPE_STRING
, pvd
->vdev_path
, NULL
);
297 fm_payload_set(ereport
,
298 FM_EREPORT_PAYLOAD_ZFS_PARENT_DEVID
,
299 DATA_TYPE_STRING
, pvd
->vdev_devid
, NULL
);
305 * Payload common to all I/Os.
307 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_ERR
,
308 DATA_TYPE_INT32
, zio
->io_error
, NULL
);
309 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS
,
310 DATA_TYPE_INT32
, zio
->io_flags
, NULL
);
311 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE
,
312 DATA_TYPE_UINT32
, zio
->io_stage
, NULL
);
313 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE
,
314 DATA_TYPE_UINT32
, zio
->io_pipeline
, NULL
);
315 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_DELAY
,
316 DATA_TYPE_UINT64
, zio
->io_delay
, NULL
);
317 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_TIMESTAMP
,
318 DATA_TYPE_UINT64
, zio
->io_timestamp
, NULL
);
319 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_DEADLINE
,
320 DATA_TYPE_UINT64
, zio
->io_deadline
, NULL
);
321 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_DELTA
,
322 DATA_TYPE_UINT64
, zio
->io_delta
, NULL
);
325 * If the 'size' parameter is non-zero, it indicates this is a
326 * RAID-Z or other I/O where the physical offset and length are
327 * provided for us, instead of within the zio_t.
331 fm_payload_set(ereport
,
332 FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET
,
333 DATA_TYPE_UINT64
, stateoroffset
,
334 FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE
,
335 DATA_TYPE_UINT64
, size
, NULL
);
337 fm_payload_set(ereport
,
338 FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET
,
339 DATA_TYPE_UINT64
, zio
->io_offset
,
340 FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE
,
341 DATA_TYPE_UINT64
, zio
->io_size
, NULL
);
345 * Payload for I/Os with corresponding logical information.
347 if (zio
->io_logical
!= NULL
)
348 fm_payload_set(ereport
,
349 FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJSET
,
351 zio
->io_logical
->io_bookmark
.zb_objset
,
352 FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJECT
,
354 zio
->io_logical
->io_bookmark
.zb_object
,
355 FM_EREPORT_PAYLOAD_ZFS_ZIO_LEVEL
,
357 zio
->io_logical
->io_bookmark
.zb_level
,
358 FM_EREPORT_PAYLOAD_ZFS_ZIO_BLKID
,
360 zio
->io_logical
->io_bookmark
.zb_blkid
, NULL
);
361 } else if (vd
!= NULL
) {
363 * If we have a vdev but no zio, this is a device fault, and the
364 * 'stateoroffset' parameter indicates the previous state of the
367 fm_payload_set(ereport
,
368 FM_EREPORT_PAYLOAD_ZFS_PREV_STATE
,
369 DATA_TYPE_UINT64
, stateoroffset
, NULL
);
372 mutex_exit(&spa
->spa_errlist_lock
);
374 *ereport_out
= ereport
;
375 *detector_out
= detector
;
378 /* if it's <= 128 bytes, save the corruption directly */
379 #define ZFM_MAX_INLINE (128 / sizeof (uint64_t))
381 #define MAX_RANGES 16
383 typedef struct zfs_ecksum_info
{
384 /* histograms of set and cleared bits by bit number in a 64-bit word */
385 uint16_t zei_histogram_set
[sizeof (uint64_t) * NBBY
];
386 uint16_t zei_histogram_cleared
[sizeof (uint64_t) * NBBY
];
388 /* inline arrays of bits set and cleared. */
389 uint64_t zei_bits_set
[ZFM_MAX_INLINE
];
390 uint64_t zei_bits_cleared
[ZFM_MAX_INLINE
];
393 * for each range, the number of bits set and cleared. The Hamming
394 * distance between the good and bad buffers is the sum of them all.
396 uint32_t zei_range_sets
[MAX_RANGES
];
397 uint32_t zei_range_clears
[MAX_RANGES
];
402 } zei_ranges
[MAX_RANGES
];
404 size_t zei_range_count
;
406 uint32_t zei_allowed_mingap
;
411 update_histogram(uint64_t value_arg
, uint16_t *hist
, uint32_t *count
)
415 uint64_t value
= BE_64(value_arg
);
417 /* We store the bits in big-endian (largest-first) order */
418 for (i
= 0; i
< 64; i
++) {
419 if (value
& (1ull << i
)) {
424 /* update the count of bits changed */
429 * We've now filled up the range array, and need to increase "mingap" and
430 * shrink the range list accordingly. zei_mingap is always the smallest
431 * distance between array entries, so we set the new_allowed_gap to be
432 * one greater than that. We then go through the list, joining together
433 * any ranges which are closer than the new_allowed_gap.
435 * By construction, there will be at least one. We also update zei_mingap
436 * to the new smallest gap, to prepare for our next invocation.
439 zei_shrink_ranges(zfs_ecksum_info_t
*eip
)
441 uint32_t mingap
= UINT32_MAX
;
442 uint32_t new_allowed_gap
= eip
->zei_mingap
+ 1;
445 size_t max
= eip
->zei_range_count
;
447 struct zei_ranges
*r
= eip
->zei_ranges
;
449 ASSERT3U(eip
->zei_range_count
, >, 0);
450 ASSERT3U(eip
->zei_range_count
, <=, MAX_RANGES
);
453 while (idx
< max
- 1) {
454 uint32_t start
= r
[idx
].zr_start
;
455 uint32_t end
= r
[idx
].zr_end
;
457 while (idx
< max
- 1) {
458 uint32_t nstart
, nend
, gap
;
461 nstart
= r
[idx
].zr_start
;
462 nend
= r
[idx
].zr_end
;
465 if (gap
< new_allowed_gap
) {
473 r
[output
].zr_start
= start
;
474 r
[output
].zr_end
= end
;
477 ASSERT3U(output
, <, eip
->zei_range_count
);
478 eip
->zei_range_count
= output
;
479 eip
->zei_mingap
= mingap
;
480 eip
->zei_allowed_mingap
= new_allowed_gap
;
484 zei_add_range(zfs_ecksum_info_t
*eip
, int start
, int end
)
486 struct zei_ranges
*r
= eip
->zei_ranges
;
487 size_t count
= eip
->zei_range_count
;
489 if (count
>= MAX_RANGES
) {
490 zei_shrink_ranges(eip
);
491 count
= eip
->zei_range_count
;
494 eip
->zei_mingap
= UINT32_MAX
;
495 eip
->zei_allowed_mingap
= 1;
497 int gap
= start
- r
[count
- 1].zr_end
;
499 if (gap
< eip
->zei_allowed_mingap
) {
500 r
[count
- 1].zr_end
= end
;
503 if (gap
< eip
->zei_mingap
)
504 eip
->zei_mingap
= gap
;
506 r
[count
].zr_start
= start
;
507 r
[count
].zr_end
= end
;
508 eip
->zei_range_count
++;
512 zei_range_total_size(zfs_ecksum_info_t
*eip
)
514 struct zei_ranges
*r
= eip
->zei_ranges
;
515 size_t count
= eip
->zei_range_count
;
519 for (idx
= 0; idx
< count
; idx
++)
520 result
+= (r
[idx
].zr_end
- r
[idx
].zr_start
);
525 static zfs_ecksum_info_t
*
526 annotate_ecksum(nvlist_t
*ereport
, zio_bad_cksum_t
*info
,
527 const uint8_t *goodbuf
, const uint8_t *badbuf
, size_t size
,
528 boolean_t drop_if_identical
)
530 const uint64_t *good
= (const uint64_t *)goodbuf
;
531 const uint64_t *bad
= (const uint64_t *)badbuf
;
534 uint64_t allcleared
= 0;
536 size_t nui64s
= size
/ sizeof (uint64_t);
546 zfs_ecksum_info_t
*eip
= kmem_zalloc(sizeof (*eip
), KM_PUSHPAGE
);
548 /* don't do any annotation for injected checksum errors */
549 if (info
!= NULL
&& info
->zbc_injected
)
552 if (info
!= NULL
&& info
->zbc_has_cksum
) {
553 fm_payload_set(ereport
,
554 FM_EREPORT_PAYLOAD_ZFS_CKSUM_EXPECTED
,
555 DATA_TYPE_UINT64_ARRAY
,
556 sizeof (info
->zbc_expected
) / sizeof (uint64_t),
557 (uint64_t *)&info
->zbc_expected
,
558 FM_EREPORT_PAYLOAD_ZFS_CKSUM_ACTUAL
,
559 DATA_TYPE_UINT64_ARRAY
,
560 sizeof (info
->zbc_actual
) / sizeof (uint64_t),
561 (uint64_t *)&info
->zbc_actual
,
562 FM_EREPORT_PAYLOAD_ZFS_CKSUM_ALGO
,
564 info
->zbc_checksum_name
,
567 if (info
->zbc_byteswapped
) {
568 fm_payload_set(ereport
,
569 FM_EREPORT_PAYLOAD_ZFS_CKSUM_BYTESWAP
,
570 DATA_TYPE_BOOLEAN
, 1,
575 if (badbuf
== NULL
|| goodbuf
== NULL
)
578 ASSERT3U(nui64s
, <=, UINT16_MAX
);
579 ASSERT3U(size
, ==, nui64s
* sizeof (uint64_t));
580 ASSERT3U(size
, <=, SPA_MAXBLOCKSIZE
);
581 ASSERT3U(size
, <=, UINT32_MAX
);
583 /* build up the range list by comparing the two buffers. */
584 for (idx
= 0; idx
< nui64s
; idx
++) {
585 if (good
[idx
] == bad
[idx
]) {
589 zei_add_range(eip
, start
, idx
);
599 zei_add_range(eip
, start
, idx
);
601 /* See if it will fit in our inline buffers */
602 inline_size
= zei_range_total_size(eip
);
603 if (inline_size
> ZFM_MAX_INLINE
)
607 * If there is no change and we want to drop if the buffers are
610 if (inline_size
== 0 && drop_if_identical
) {
611 kmem_free(eip
, sizeof (*eip
));
616 * Now walk through the ranges, filling in the details of the
617 * differences. Also convert our uint64_t-array offsets to byte
620 for (range
= 0; range
< eip
->zei_range_count
; range
++) {
621 size_t start
= eip
->zei_ranges
[range
].zr_start
;
622 size_t end
= eip
->zei_ranges
[range
].zr_end
;
624 for (idx
= start
; idx
< end
; idx
++) {
625 uint64_t set
, cleared
;
627 // bits set in bad, but not in good
628 set
= ((~good
[idx
]) & bad
[idx
]);
629 // bits set in good, but not in bad
630 cleared
= (good
[idx
] & (~bad
[idx
]));
633 allcleared
|= cleared
;
636 ASSERT3U(offset
, <, inline_size
);
637 eip
->zei_bits_set
[offset
] = set
;
638 eip
->zei_bits_cleared
[offset
] = cleared
;
642 update_histogram(set
, eip
->zei_histogram_set
,
643 &eip
->zei_range_sets
[range
]);
644 update_histogram(cleared
, eip
->zei_histogram_cleared
,
645 &eip
->zei_range_clears
[range
]);
648 /* convert to byte offsets */
649 eip
->zei_ranges
[range
].zr_start
*= sizeof (uint64_t);
650 eip
->zei_ranges
[range
].zr_end
*= sizeof (uint64_t);
652 eip
->zei_allowed_mingap
*= sizeof (uint64_t);
653 inline_size
*= sizeof (uint64_t);
655 /* fill in ereport */
656 fm_payload_set(ereport
,
657 FM_EREPORT_PAYLOAD_ZFS_BAD_OFFSET_RANGES
,
658 DATA_TYPE_UINT32_ARRAY
, 2 * eip
->zei_range_count
,
659 (uint32_t *)eip
->zei_ranges
,
660 FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_MIN_GAP
,
661 DATA_TYPE_UINT32
, eip
->zei_allowed_mingap
,
662 FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_SETS
,
663 DATA_TYPE_UINT32_ARRAY
, eip
->zei_range_count
, eip
->zei_range_sets
,
664 FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_CLEARS
,
665 DATA_TYPE_UINT32_ARRAY
, eip
->zei_range_count
, eip
->zei_range_clears
,
669 fm_payload_set(ereport
,
670 FM_EREPORT_PAYLOAD_ZFS_BAD_SET_BITS
,
671 DATA_TYPE_UINT8_ARRAY
,
672 inline_size
, (uint8_t *)eip
->zei_bits_set
,
673 FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_BITS
,
674 DATA_TYPE_UINT8_ARRAY
,
675 inline_size
, (uint8_t *)eip
->zei_bits_cleared
,
678 fm_payload_set(ereport
,
679 FM_EREPORT_PAYLOAD_ZFS_BAD_SET_HISTOGRAM
,
680 DATA_TYPE_UINT16_ARRAY
,
681 NBBY
* sizeof (uint64_t), eip
->zei_histogram_set
,
682 FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_HISTOGRAM
,
683 DATA_TYPE_UINT16_ARRAY
,
684 NBBY
* sizeof (uint64_t), eip
->zei_histogram_cleared
,
692 zfs_ereport_post(const char *subclass
, spa_t
*spa
, vdev_t
*vd
, zio_t
*zio
,
693 uint64_t stateoroffset
, uint64_t size
)
696 nvlist_t
*ereport
= NULL
;
697 nvlist_t
*detector
= NULL
;
699 zfs_ereport_start(&ereport
, &detector
,
700 subclass
, spa
, vd
, zio
, stateoroffset
, size
);
705 /* Cleanup is handled by the callback function */
706 zfs_zevent_post(ereport
, detector
, zfs_zevent_post_cb
);
711 zfs_ereport_start_checksum(spa_t
*spa
, vdev_t
*vd
,
712 struct zio
*zio
, uint64_t offset
, uint64_t length
, void *arg
,
713 zio_bad_cksum_t
*info
)
715 zio_cksum_report_t
*report
= kmem_zalloc(sizeof (*report
), KM_PUSHPAGE
);
717 if (zio
->io_vsd
!= NULL
)
718 zio
->io_vsd_ops
->vsd_cksum_report(zio
, report
, arg
);
720 zio_vsd_default_cksum_report(zio
, report
, arg
);
722 /* copy the checksum failure information if it was provided */
724 report
->zcr_ckinfo
= kmem_zalloc(sizeof (*info
), KM_PUSHPAGE
);
725 bcopy(info
, report
->zcr_ckinfo
, sizeof (*info
));
728 report
->zcr_align
= 1ULL << vd
->vdev_top
->vdev_ashift
;
729 report
->zcr_length
= length
;
732 zfs_ereport_start(&report
->zcr_ereport
, &report
->zcr_detector
,
733 FM_EREPORT_ZFS_CHECKSUM
, spa
, vd
, zio
, offset
, length
);
735 if (report
->zcr_ereport
== NULL
) {
736 report
->zcr_free(report
->zcr_cbdata
, report
->zcr_cbinfo
);
737 if (report
->zcr_ckinfo
!= NULL
) {
738 kmem_free(report
->zcr_ckinfo
,
739 sizeof (*report
->zcr_ckinfo
));
741 kmem_free(report
, sizeof (*report
));
746 mutex_enter(&spa
->spa_errlist_lock
);
747 report
->zcr_next
= zio
->io_logical
->io_cksum_report
;
748 zio
->io_logical
->io_cksum_report
= report
;
749 mutex_exit(&spa
->spa_errlist_lock
);
753 zfs_ereport_finish_checksum(zio_cksum_report_t
*report
,
754 const void *good_data
, const void *bad_data
, boolean_t drop_if_identical
)
757 zfs_ecksum_info_t
*info
= NULL
;
758 info
= annotate_ecksum(report
->zcr_ereport
, report
->zcr_ckinfo
,
759 good_data
, bad_data
, report
->zcr_length
, drop_if_identical
);
762 zfs_zevent_post(report
->zcr_ereport
,
763 report
->zcr_detector
, zfs_zevent_post_cb
);
765 report
->zcr_ereport
= report
->zcr_detector
= NULL
;
767 kmem_free(info
, sizeof (*info
));
772 zfs_ereport_free_checksum(zio_cksum_report_t
*rpt
)
775 if (rpt
->zcr_ereport
!= NULL
) {
776 fm_nvlist_destroy(rpt
->zcr_ereport
,
778 fm_nvlist_destroy(rpt
->zcr_detector
,
782 rpt
->zcr_free(rpt
->zcr_cbdata
, rpt
->zcr_cbinfo
);
784 if (rpt
->zcr_ckinfo
!= NULL
)
785 kmem_free(rpt
->zcr_ckinfo
, sizeof (*rpt
->zcr_ckinfo
));
787 kmem_free(rpt
, sizeof (*rpt
));
791 zfs_ereport_send_interim_checksum(zio_cksum_report_t
*report
)
794 zfs_zevent_post(report
->zcr_ereport
, report
->zcr_detector
, NULL
);
799 zfs_ereport_post_checksum(spa_t
*spa
, vdev_t
*vd
,
800 struct zio
*zio
, uint64_t offset
, uint64_t length
,
801 const void *good_data
, const void *bad_data
, zio_bad_cksum_t
*zbc
)
804 nvlist_t
*ereport
= NULL
;
805 nvlist_t
*detector
= NULL
;
806 zfs_ecksum_info_t
*info
;
808 zfs_ereport_start(&ereport
, &detector
,
809 FM_EREPORT_ZFS_CHECKSUM
, spa
, vd
, zio
, offset
, length
);
814 info
= annotate_ecksum(ereport
, zbc
, good_data
, bad_data
, length
,
818 zfs_zevent_post(ereport
, detector
, zfs_zevent_post_cb
);
819 kmem_free(info
, sizeof (*info
));
825 zfs_post_common(spa_t
*spa
, vdev_t
*vd
, const char *name
)
831 if (spa_load_state(spa
) == SPA_LOAD_TRYIMPORT
)
834 if ((resource
= fm_nvlist_create(NULL
)) == NULL
)
837 (void) snprintf(class, sizeof (class), "%s.%s.%s", FM_RSRC_RESOURCE
,
838 ZFS_ERROR_CLASS
, name
);
839 VERIFY(nvlist_add_uint8(resource
, FM_VERSION
, FM_RSRC_VERSION
) == 0);
840 VERIFY(nvlist_add_string(resource
, FM_CLASS
, class) == 0);
841 VERIFY(nvlist_add_uint64(resource
,
842 FM_EREPORT_PAYLOAD_ZFS_POOL_GUID
, spa_guid(spa
)) == 0);
844 VERIFY(nvlist_add_uint64(resource
,
845 FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID
, vd
->vdev_guid
) == 0);
846 VERIFY(nvlist_add_uint64(resource
,
847 FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE
, vd
->vdev_state
) == 0);
850 zfs_zevent_post(resource
, NULL
, zfs_zevent_post_cb
);
855 * The 'resource.fs.zfs.removed' event is an internal signal that the given vdev
856 * has been removed from the system. This will cause the DE to ignore any
857 * recent I/O errors, inferring that they are due to the asynchronous device
861 zfs_post_remove(spa_t
*spa
, vdev_t
*vd
)
863 zfs_post_common(spa
, vd
, FM_EREPORT_RESOURCE_REMOVED
);
867 * The 'resource.fs.zfs.autoreplace' event is an internal signal that the pool
868 * has the 'autoreplace' property set, and therefore any broken vdevs will be
869 * handled by higher level logic, and no vdev fault should be generated.
872 zfs_post_autoreplace(spa_t
*spa
, vdev_t
*vd
)
874 zfs_post_common(spa
, vd
, FM_EREPORT_RESOURCE_AUTOREPLACE
);
878 * The 'resource.fs.zfs.statechange' event is an internal signal that the
879 * given vdev has transitioned its state to DEGRADED or HEALTHY. This will
880 * cause the retire agent to repair any outstanding fault management cases
881 * open because the device was not found (fault.fs.zfs.device).
884 zfs_post_state_change(spa_t
*spa
, vdev_t
*vd
)
886 zfs_post_common(spa
, vd
, FM_EREPORT_RESOURCE_STATECHANGE
);
889 #if defined(_KERNEL) && defined(HAVE_SPL)
890 EXPORT_SYMBOL(zfs_ereport_post
);
891 EXPORT_SYMBOL(zfs_ereport_post_checksum
);
892 EXPORT_SYMBOL(zfs_post_remove
);
893 EXPORT_SYMBOL(zfs_post_autoreplace
);
894 EXPORT_SYMBOL(zfs_post_state_change
);