4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2012 by Delphix. All rights reserved.
31 #include <sys/spa_impl.h>
33 #include <sys/vdev_impl.h>
35 #include <sys/zio_checksum.h>
37 #include <sys/fm/fs/zfs.h>
38 #include <sys/fm/protocol.h>
39 #include <sys/fm/util.h>
40 #include <sys/sysevent.h>
43 * This general routine is responsible for generating all the different ZFS
44 * ereports. The payload is dependent on the class, and which arguments are
45 * supplied to the function:
47 * EREPORT POOL VDEV IO
53 * If we are in a loading state, all errors are chained together by the same
54 * SPA-wide ENA (Error Numeric Association).
56 * For isolated I/O requests, we get the ENA from the zio_t. The propagation
57 * gets very complicated due to RAID-Z, gang blocks, and vdev caching. We want
58 * to chain together all ereports associated with a logical piece of data. For
59 * read I/Os, there are basically three 'types' of I/O, which form a roughly
63 * | Aggregate I/O | No associated logical data or device
67 * +---------------+ Reads associated with a piece of logical data.
68 * | Read I/O | This includes reads on behalf of RAID-Z,
69 * +---------------+ mirrors, gang blocks, retries, etc.
72 * +---------------+ Reads associated with a particular device, but
73 * | Physical I/O | no logical data. Issued as part of vdev caching
74 * +---------------+ and I/O aggregation.
76 * Note that 'physical I/O' here is not the same terminology as used in the rest
77 * of ZIO. Typically, 'physical I/O' simply means that there is no attached
78 * blockpointer. But I/O with no associated block pointer can still be related
79 * to a logical piece of data (i.e. RAID-Z requests).
81 * Purely physical I/O always have unique ENAs. They are not related to a
82 * particular piece of logical data, and therefore cannot be chained together.
83 * We still generate an ereport, but the DE doesn't correlate it with any
84 * logical piece of data. When such an I/O fails, the delegated I/O requests
85 * will issue a retry, which will trigger the 'real' ereport with the correct
88 * We keep track of the ENA for a ZIO chain through the 'io_logical' member.
89 * When a new logical I/O is issued, we set this to point to itself. Child I/Os
90 * then inherit this pointer, so that when it is first set subsequent failures
91 * will use the same ENA. For vdev cache fill and queue aggregation I/O,
92 * this pointer is set to NULL, and no ereport will be generated (since it
93 * doesn't actually correspond to any particular device or piece of data,
94 * and the caller will always retry without caching or queueing anyway).
96 * For checksum errors, we want to include more information about the actual
97 * error which occurs. Accordingly, we build an ereport when the error is
98 * noticed, but instead of sending it in immediately, we hang it off of the
99 * io_cksum_report field of the logical IO. When the logical IO completes
100 * (successfully or not), zfs_ereport_finish_checksum() is called with the
101 * good and bad versions of the buffer (if available), and we annotate the
102 * ereport with information about the differences.
106 zfs_zevent_post_cb(nvlist_t
*nvl
, nvlist_t
*detector
)
109 fm_nvlist_destroy(nvl
, FM_NVA_FREE
);
112 fm_nvlist_destroy(detector
, FM_NVA_FREE
);
116 zfs_zevent_post_cb_noop(nvlist_t
*nvl
, nvlist_t
*detector
)
121 zfs_ereport_start(nvlist_t
**ereport_out
, nvlist_t
**detector_out
,
122 const char *subclass
, spa_t
*spa
, vdev_t
*vd
, zio_t
*zio
,
123 uint64_t stateoroffset
, uint64_t size
)
125 nvlist_t
*ereport
, *detector
;
131 * If we are doing a spa_tryimport() or in recovery mode,
134 if (spa_load_state(spa
) == SPA_LOAD_TRYIMPORT
||
135 spa_load_state(spa
) == SPA_LOAD_RECOVER
)
139 * If we are in the middle of opening a pool, and the previous attempt
140 * failed, don't bother logging any new ereports - we're just going to
141 * get the same diagnosis anyway.
143 if (spa_load_state(spa
) != SPA_LOAD_NONE
&&
144 spa
->spa_last_open_failed
)
149 * If this is not a read or write zio, ignore the error. This
150 * can occur if the DKIOCFLUSHWRITECACHE ioctl fails.
152 if (zio
->io_type
!= ZIO_TYPE_READ
&&
153 zio
->io_type
!= ZIO_TYPE_WRITE
)
158 * If the vdev has already been marked as failing due
159 * to a failed probe, then ignore any subsequent I/O
160 * errors, as the DE will automatically fault the vdev
161 * on the first such failure. This also catches cases
162 * where vdev_remove_wanted is set and the device has
163 * not yet been asynchronously placed into the REMOVED
166 if (zio
->io_vd
== vd
&& !vdev_accessible(vd
, zio
))
170 * Ignore checksum errors for reads from DTL regions of
173 if (zio
->io_type
== ZIO_TYPE_READ
&&
174 zio
->io_error
== ECKSUM
&&
175 vd
->vdev_ops
->vdev_op_leaf
&&
176 vdev_dtl_contains(vd
, DTL_MISSING
, zio
->io_txg
, 1))
182 * For probe failure, we want to avoid posting ereports if we've
183 * already removed the device in the meantime.
186 strcmp(subclass
, FM_EREPORT_ZFS_PROBE_FAILURE
) == 0 &&
187 (vd
->vdev_remove_wanted
|| vd
->vdev_state
== VDEV_STATE_REMOVED
))
190 if ((ereport
= fm_nvlist_create(NULL
)) == NULL
)
193 if ((detector
= fm_nvlist_create(NULL
)) == NULL
) {
194 fm_nvlist_destroy(ereport
, FM_NVA_FREE
);
199 * Serialize ereport generation
201 mutex_enter(&spa
->spa_errlist_lock
);
204 * Determine the ENA to use for this event. If we are in a loading
205 * state, use a SPA-wide ENA. Otherwise, if we are in an I/O state, use
206 * a root zio-wide ENA. Otherwise, simply use a unique ENA.
208 if (spa_load_state(spa
) != SPA_LOAD_NONE
) {
209 if (spa
->spa_ena
== 0)
210 spa
->spa_ena
= fm_ena_generate(0, FM_ENA_FMT1
);
212 } else if (zio
!= NULL
&& zio
->io_logical
!= NULL
) {
213 if (zio
->io_logical
->io_ena
== 0)
214 zio
->io_logical
->io_ena
=
215 fm_ena_generate(0, FM_ENA_FMT1
);
216 ena
= zio
->io_logical
->io_ena
;
218 ena
= fm_ena_generate(0, FM_ENA_FMT1
);
222 * Construct the full class, detector, and other standard FMA fields.
224 (void) snprintf(class, sizeof (class), "%s.%s",
225 ZFS_ERROR_CLASS
, subclass
);
227 fm_fmri_zfs_set(detector
, FM_ZFS_SCHEME_VERSION
, spa_guid(spa
),
228 vd
!= NULL
? vd
->vdev_guid
: 0);
230 fm_ereport_set(ereport
, FM_EREPORT_VERSION
, class, ena
, detector
, NULL
);
233 * Construct the per-ereport payload, depending on which parameters are
238 * Generic payload members common to all ereports.
240 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_POOL
,
241 DATA_TYPE_STRING
, spa_name(spa
), FM_EREPORT_PAYLOAD_ZFS_POOL_GUID
,
242 DATA_TYPE_UINT64
, spa_guid(spa
),
243 FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT
, DATA_TYPE_INT32
,
244 spa_load_state(spa
), NULL
);
247 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_POOL_FAILMODE
,
249 spa_get_failmode(spa
) == ZIO_FAILURE_MODE_WAIT
?
250 FM_EREPORT_FAILMODE_WAIT
:
251 spa_get_failmode(spa
) == ZIO_FAILURE_MODE_CONTINUE
?
252 FM_EREPORT_FAILMODE_CONTINUE
: FM_EREPORT_FAILMODE_PANIC
,
257 vdev_t
*pvd
= vd
->vdev_parent
;
258 vdev_queue_t
*vq
= &vd
->vdev_queue
;
259 vdev_stat_t
*vs
= &vd
->vdev_stat
;
261 uint64_t *spare_guids
;
265 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID
,
266 DATA_TYPE_UINT64
, vd
->vdev_guid
,
267 FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE
,
268 DATA_TYPE_STRING
, vd
->vdev_ops
->vdev_op_type
, NULL
);
269 if (vd
->vdev_path
!= NULL
)
270 fm_payload_set(ereport
,
271 FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH
,
272 DATA_TYPE_STRING
, vd
->vdev_path
, NULL
);
273 if (vd
->vdev_devid
!= NULL
)
274 fm_payload_set(ereport
,
275 FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID
,
276 DATA_TYPE_STRING
, vd
->vdev_devid
, NULL
);
277 if (vd
->vdev_fru
!= NULL
)
278 fm_payload_set(ereport
,
279 FM_EREPORT_PAYLOAD_ZFS_VDEV_FRU
,
280 DATA_TYPE_STRING
, vd
->vdev_fru
, NULL
);
282 fm_payload_set(ereport
,
283 FM_EREPORT_PAYLOAD_ZFS_VDEV_ASHIFT
,
284 DATA_TYPE_UINT64
, vd
->vdev_ashift
, NULL
);
287 fm_payload_set(ereport
,
288 FM_EREPORT_PAYLOAD_ZFS_VDEV_COMP_TS
,
289 DATA_TYPE_UINT64
, vq
->vq_io_complete_ts
, NULL
);
290 fm_payload_set(ereport
,
291 FM_EREPORT_PAYLOAD_ZFS_VDEV_DELTA_TS
,
292 DATA_TYPE_UINT64
, vq
->vq_io_delta_ts
, NULL
);
296 fm_payload_set(ereport
,
297 FM_EREPORT_PAYLOAD_ZFS_VDEV_READ_ERRORS
,
298 DATA_TYPE_UINT64
, vs
->vs_read_errors
,
299 FM_EREPORT_PAYLOAD_ZFS_VDEV_WRITE_ERRORS
,
300 DATA_TYPE_UINT64
, vs
->vs_write_errors
,
301 FM_EREPORT_PAYLOAD_ZFS_VDEV_CKSUM_ERRORS
,
302 DATA_TYPE_UINT64
, vs
->vs_checksum_errors
, NULL
);
306 fm_payload_set(ereport
,
307 FM_EREPORT_PAYLOAD_ZFS_PARENT_GUID
,
308 DATA_TYPE_UINT64
, pvd
->vdev_guid
,
309 FM_EREPORT_PAYLOAD_ZFS_PARENT_TYPE
,
310 DATA_TYPE_STRING
, pvd
->vdev_ops
->vdev_op_type
,
313 fm_payload_set(ereport
,
314 FM_EREPORT_PAYLOAD_ZFS_PARENT_PATH
,
315 DATA_TYPE_STRING
, pvd
->vdev_path
, NULL
);
317 fm_payload_set(ereport
,
318 FM_EREPORT_PAYLOAD_ZFS_PARENT_DEVID
,
319 DATA_TYPE_STRING
, pvd
->vdev_devid
, NULL
);
322 spare_count
= spa
->spa_spares
.sav_count
;
323 spare_paths
= kmem_zalloc(sizeof (char *) * spare_count
,
325 spare_guids
= kmem_zalloc(sizeof (uint64_t) * spare_count
,
328 for (i
= 0; i
< spare_count
; i
++) {
329 spare_vd
= spa
->spa_spares
.sav_vdevs
[i
];
331 spare_paths
[i
] = spare_vd
->vdev_path
;
332 spare_guids
[i
] = spare_vd
->vdev_guid
;
336 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_VDEV_SPARE_PATHS
,
337 DATA_TYPE_STRING_ARRAY
, spare_count
, spare_paths
,
338 FM_EREPORT_PAYLOAD_ZFS_VDEV_SPARE_GUIDS
,
339 DATA_TYPE_UINT64_ARRAY
, spare_count
, spare_guids
, NULL
);
341 kmem_free(spare_guids
, sizeof (uint64_t) * spare_count
);
342 kmem_free(spare_paths
, sizeof (char *) * spare_count
);
347 * Payload common to all I/Os.
349 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_ERR
,
350 DATA_TYPE_INT32
, zio
->io_error
, NULL
);
351 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS
,
352 DATA_TYPE_INT32
, zio
->io_flags
, NULL
);
353 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE
,
354 DATA_TYPE_UINT32
, zio
->io_stage
, NULL
);
355 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE
,
356 DATA_TYPE_UINT32
, zio
->io_pipeline
, NULL
);
357 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_DELAY
,
358 DATA_TYPE_UINT64
, zio
->io_delay
, NULL
);
359 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_TIMESTAMP
,
360 DATA_TYPE_UINT64
, zio
->io_timestamp
, NULL
);
361 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_DELTA
,
362 DATA_TYPE_UINT64
, zio
->io_delta
, NULL
);
365 * If the 'size' parameter is non-zero, it indicates this is a
366 * RAID-Z or other I/O where the physical offset and length are
367 * provided for us, instead of within the zio_t.
371 fm_payload_set(ereport
,
372 FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET
,
373 DATA_TYPE_UINT64
, stateoroffset
,
374 FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE
,
375 DATA_TYPE_UINT64
, size
, NULL
);
377 fm_payload_set(ereport
,
378 FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET
,
379 DATA_TYPE_UINT64
, zio
->io_offset
,
380 FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE
,
381 DATA_TYPE_UINT64
, zio
->io_size
, NULL
);
385 * Payload for I/Os with corresponding logical information.
387 if (zio
->io_logical
!= NULL
)
388 fm_payload_set(ereport
,
389 FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJSET
,
391 zio
->io_logical
->io_bookmark
.zb_objset
,
392 FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJECT
,
394 zio
->io_logical
->io_bookmark
.zb_object
,
395 FM_EREPORT_PAYLOAD_ZFS_ZIO_LEVEL
,
397 zio
->io_logical
->io_bookmark
.zb_level
,
398 FM_EREPORT_PAYLOAD_ZFS_ZIO_BLKID
,
400 zio
->io_logical
->io_bookmark
.zb_blkid
, NULL
);
401 } else if (vd
!= NULL
) {
403 * If we have a vdev but no zio, this is a device fault, and the
404 * 'stateoroffset' parameter indicates the previous state of the
407 fm_payload_set(ereport
,
408 FM_EREPORT_PAYLOAD_ZFS_PREV_STATE
,
409 DATA_TYPE_UINT64
, stateoroffset
, NULL
);
412 mutex_exit(&spa
->spa_errlist_lock
);
414 *ereport_out
= ereport
;
415 *detector_out
= detector
;
418 /* if it's <= 128 bytes, save the corruption directly */
419 #define ZFM_MAX_INLINE (128 / sizeof (uint64_t))
421 #define MAX_RANGES 16
423 typedef struct zfs_ecksum_info
{
424 /* histograms of set and cleared bits by bit number in a 64-bit word */
425 uint16_t zei_histogram_set
[sizeof (uint64_t) * NBBY
];
426 uint16_t zei_histogram_cleared
[sizeof (uint64_t) * NBBY
];
428 /* inline arrays of bits set and cleared. */
429 uint64_t zei_bits_set
[ZFM_MAX_INLINE
];
430 uint64_t zei_bits_cleared
[ZFM_MAX_INLINE
];
433 * for each range, the number of bits set and cleared. The Hamming
434 * distance between the good and bad buffers is the sum of them all.
436 uint32_t zei_range_sets
[MAX_RANGES
];
437 uint32_t zei_range_clears
[MAX_RANGES
];
442 } zei_ranges
[MAX_RANGES
];
444 size_t zei_range_count
;
446 uint32_t zei_allowed_mingap
;
451 update_histogram(uint64_t value_arg
, uint16_t *hist
, uint32_t *count
)
455 uint64_t value
= BE_64(value_arg
);
457 /* We store the bits in big-endian (largest-first) order */
458 for (i
= 0; i
< 64; i
++) {
459 if (value
& (1ull << i
)) {
464 /* update the count of bits changed */
469 * We've now filled up the range array, and need to increase "mingap" and
470 * shrink the range list accordingly. zei_mingap is always the smallest
471 * distance between array entries, so we set the new_allowed_gap to be
472 * one greater than that. We then go through the list, joining together
473 * any ranges which are closer than the new_allowed_gap.
475 * By construction, there will be at least one. We also update zei_mingap
476 * to the new smallest gap, to prepare for our next invocation.
479 zei_shrink_ranges(zfs_ecksum_info_t
*eip
)
481 uint32_t mingap
= UINT32_MAX
;
482 uint32_t new_allowed_gap
= eip
->zei_mingap
+ 1;
485 size_t max
= eip
->zei_range_count
;
487 struct zei_ranges
*r
= eip
->zei_ranges
;
489 ASSERT3U(eip
->zei_range_count
, >, 0);
490 ASSERT3U(eip
->zei_range_count
, <=, MAX_RANGES
);
493 while (idx
< max
- 1) {
494 uint32_t start
= r
[idx
].zr_start
;
495 uint32_t end
= r
[idx
].zr_end
;
497 while (idx
< max
- 1) {
498 uint32_t nstart
, nend
, gap
;
501 nstart
= r
[idx
].zr_start
;
502 nend
= r
[idx
].zr_end
;
505 if (gap
< new_allowed_gap
) {
513 r
[output
].zr_start
= start
;
514 r
[output
].zr_end
= end
;
517 ASSERT3U(output
, <, eip
->zei_range_count
);
518 eip
->zei_range_count
= output
;
519 eip
->zei_mingap
= mingap
;
520 eip
->zei_allowed_mingap
= new_allowed_gap
;
524 zei_add_range(zfs_ecksum_info_t
*eip
, int start
, int end
)
526 struct zei_ranges
*r
= eip
->zei_ranges
;
527 size_t count
= eip
->zei_range_count
;
529 if (count
>= MAX_RANGES
) {
530 zei_shrink_ranges(eip
);
531 count
= eip
->zei_range_count
;
534 eip
->zei_mingap
= UINT32_MAX
;
535 eip
->zei_allowed_mingap
= 1;
537 int gap
= start
- r
[count
- 1].zr_end
;
539 if (gap
< eip
->zei_allowed_mingap
) {
540 r
[count
- 1].zr_end
= end
;
543 if (gap
< eip
->zei_mingap
)
544 eip
->zei_mingap
= gap
;
546 r
[count
].zr_start
= start
;
547 r
[count
].zr_end
= end
;
548 eip
->zei_range_count
++;
552 zei_range_total_size(zfs_ecksum_info_t
*eip
)
554 struct zei_ranges
*r
= eip
->zei_ranges
;
555 size_t count
= eip
->zei_range_count
;
559 for (idx
= 0; idx
< count
; idx
++)
560 result
+= (r
[idx
].zr_end
- r
[idx
].zr_start
);
565 static zfs_ecksum_info_t
*
566 annotate_ecksum(nvlist_t
*ereport
, zio_bad_cksum_t
*info
,
567 const uint8_t *goodbuf
, const uint8_t *badbuf
, size_t size
,
568 boolean_t drop_if_identical
)
570 const uint64_t *good
= (const uint64_t *)goodbuf
;
571 const uint64_t *bad
= (const uint64_t *)badbuf
;
574 uint64_t allcleared
= 0;
576 size_t nui64s
= size
/ sizeof (uint64_t);
586 zfs_ecksum_info_t
*eip
= kmem_zalloc(sizeof (*eip
), KM_PUSHPAGE
);
588 /* don't do any annotation for injected checksum errors */
589 if (info
!= NULL
&& info
->zbc_injected
)
592 if (info
!= NULL
&& info
->zbc_has_cksum
) {
593 fm_payload_set(ereport
,
594 FM_EREPORT_PAYLOAD_ZFS_CKSUM_EXPECTED
,
595 DATA_TYPE_UINT64_ARRAY
,
596 sizeof (info
->zbc_expected
) / sizeof (uint64_t),
597 (uint64_t *)&info
->zbc_expected
,
598 FM_EREPORT_PAYLOAD_ZFS_CKSUM_ACTUAL
,
599 DATA_TYPE_UINT64_ARRAY
,
600 sizeof (info
->zbc_actual
) / sizeof (uint64_t),
601 (uint64_t *)&info
->zbc_actual
,
602 FM_EREPORT_PAYLOAD_ZFS_CKSUM_ALGO
,
604 info
->zbc_checksum_name
,
607 if (info
->zbc_byteswapped
) {
608 fm_payload_set(ereport
,
609 FM_EREPORT_PAYLOAD_ZFS_CKSUM_BYTESWAP
,
610 DATA_TYPE_BOOLEAN
, 1,
615 if (badbuf
== NULL
|| goodbuf
== NULL
)
618 ASSERT3U(nui64s
, <=, UINT16_MAX
);
619 ASSERT3U(size
, ==, nui64s
* sizeof (uint64_t));
620 ASSERT3U(size
, <=, SPA_MAXBLOCKSIZE
);
621 ASSERT3U(size
, <=, UINT32_MAX
);
623 /* build up the range list by comparing the two buffers. */
624 for (idx
= 0; idx
< nui64s
; idx
++) {
625 if (good
[idx
] == bad
[idx
]) {
629 zei_add_range(eip
, start
, idx
);
639 zei_add_range(eip
, start
, idx
);
641 /* See if it will fit in our inline buffers */
642 inline_size
= zei_range_total_size(eip
);
643 if (inline_size
> ZFM_MAX_INLINE
)
647 * If there is no change and we want to drop if the buffers are
650 if (inline_size
== 0 && drop_if_identical
) {
651 kmem_free(eip
, sizeof (*eip
));
656 * Now walk through the ranges, filling in the details of the
657 * differences. Also convert our uint64_t-array offsets to byte
660 for (range
= 0; range
< eip
->zei_range_count
; range
++) {
661 size_t start
= eip
->zei_ranges
[range
].zr_start
;
662 size_t end
= eip
->zei_ranges
[range
].zr_end
;
664 for (idx
= start
; idx
< end
; idx
++) {
665 uint64_t set
, cleared
;
667 // bits set in bad, but not in good
668 set
= ((~good
[idx
]) & bad
[idx
]);
669 // bits set in good, but not in bad
670 cleared
= (good
[idx
] & (~bad
[idx
]));
673 allcleared
|= cleared
;
676 ASSERT3U(offset
, <, inline_size
);
677 eip
->zei_bits_set
[offset
] = set
;
678 eip
->zei_bits_cleared
[offset
] = cleared
;
682 update_histogram(set
, eip
->zei_histogram_set
,
683 &eip
->zei_range_sets
[range
]);
684 update_histogram(cleared
, eip
->zei_histogram_cleared
,
685 &eip
->zei_range_clears
[range
]);
688 /* convert to byte offsets */
689 eip
->zei_ranges
[range
].zr_start
*= sizeof (uint64_t);
690 eip
->zei_ranges
[range
].zr_end
*= sizeof (uint64_t);
692 eip
->zei_allowed_mingap
*= sizeof (uint64_t);
693 inline_size
*= sizeof (uint64_t);
695 /* fill in ereport */
696 fm_payload_set(ereport
,
697 FM_EREPORT_PAYLOAD_ZFS_BAD_OFFSET_RANGES
,
698 DATA_TYPE_UINT32_ARRAY
, 2 * eip
->zei_range_count
,
699 (uint32_t *)eip
->zei_ranges
,
700 FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_MIN_GAP
,
701 DATA_TYPE_UINT32
, eip
->zei_allowed_mingap
,
702 FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_SETS
,
703 DATA_TYPE_UINT32_ARRAY
, eip
->zei_range_count
, eip
->zei_range_sets
,
704 FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_CLEARS
,
705 DATA_TYPE_UINT32_ARRAY
, eip
->zei_range_count
, eip
->zei_range_clears
,
709 fm_payload_set(ereport
,
710 FM_EREPORT_PAYLOAD_ZFS_BAD_SET_BITS
,
711 DATA_TYPE_UINT8_ARRAY
,
712 inline_size
, (uint8_t *)eip
->zei_bits_set
,
713 FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_BITS
,
714 DATA_TYPE_UINT8_ARRAY
,
715 inline_size
, (uint8_t *)eip
->zei_bits_cleared
,
718 fm_payload_set(ereport
,
719 FM_EREPORT_PAYLOAD_ZFS_BAD_SET_HISTOGRAM
,
720 DATA_TYPE_UINT16_ARRAY
,
721 NBBY
* sizeof (uint64_t), eip
->zei_histogram_set
,
722 FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_HISTOGRAM
,
723 DATA_TYPE_UINT16_ARRAY
,
724 NBBY
* sizeof (uint64_t), eip
->zei_histogram_cleared
,
732 zfs_ereport_post(const char *subclass
, spa_t
*spa
, vdev_t
*vd
, zio_t
*zio
,
733 uint64_t stateoroffset
, uint64_t size
)
736 nvlist_t
*ereport
= NULL
;
737 nvlist_t
*detector
= NULL
;
739 zfs_ereport_start(&ereport
, &detector
,
740 subclass
, spa
, vd
, zio
, stateoroffset
, size
);
745 /* Cleanup is handled by the callback function */
746 zfs_zevent_post(ereport
, detector
, zfs_zevent_post_cb
);
751 zfs_ereport_start_checksum(spa_t
*spa
, vdev_t
*vd
,
752 struct zio
*zio
, uint64_t offset
, uint64_t length
, void *arg
,
753 zio_bad_cksum_t
*info
)
755 zio_cksum_report_t
*report
= kmem_zalloc(sizeof (*report
), KM_PUSHPAGE
);
757 if (zio
->io_vsd
!= NULL
)
758 zio
->io_vsd_ops
->vsd_cksum_report(zio
, report
, arg
);
760 zio_vsd_default_cksum_report(zio
, report
, arg
);
762 /* copy the checksum failure information if it was provided */
764 report
->zcr_ckinfo
= kmem_zalloc(sizeof (*info
), KM_PUSHPAGE
);
765 bcopy(info
, report
->zcr_ckinfo
, sizeof (*info
));
768 report
->zcr_align
= 1ULL << vd
->vdev_top
->vdev_ashift
;
769 report
->zcr_length
= length
;
772 zfs_ereport_start(&report
->zcr_ereport
, &report
->zcr_detector
,
773 FM_EREPORT_ZFS_CHECKSUM
, spa
, vd
, zio
, offset
, length
);
775 if (report
->zcr_ereport
== NULL
) {
776 zfs_ereport_free_checksum(report
);
781 mutex_enter(&spa
->spa_errlist_lock
);
782 report
->zcr_next
= zio
->io_logical
->io_cksum_report
;
783 zio
->io_logical
->io_cksum_report
= report
;
784 mutex_exit(&spa
->spa_errlist_lock
);
788 zfs_ereport_finish_checksum(zio_cksum_report_t
*report
,
789 const void *good_data
, const void *bad_data
, boolean_t drop_if_identical
)
792 zfs_ecksum_info_t
*info
;
794 info
= annotate_ecksum(report
->zcr_ereport
, report
->zcr_ckinfo
,
795 good_data
, bad_data
, report
->zcr_length
, drop_if_identical
);
797 zfs_zevent_post(report
->zcr_ereport
,
798 report
->zcr_detector
, zfs_zevent_post_cb
);
800 zfs_zevent_post_cb(report
->zcr_ereport
, report
->zcr_detector
);
802 report
->zcr_ereport
= report
->zcr_detector
= NULL
;
804 kmem_free(info
, sizeof (*info
));
809 zfs_ereport_free_checksum(zio_cksum_report_t
*rpt
)
812 if (rpt
->zcr_ereport
!= NULL
) {
813 fm_nvlist_destroy(rpt
->zcr_ereport
,
815 fm_nvlist_destroy(rpt
->zcr_detector
,
819 rpt
->zcr_free(rpt
->zcr_cbdata
, rpt
->zcr_cbinfo
);
821 if (rpt
->zcr_ckinfo
!= NULL
)
822 kmem_free(rpt
->zcr_ckinfo
, sizeof (*rpt
->zcr_ckinfo
));
824 kmem_free(rpt
, sizeof (*rpt
));
828 zfs_ereport_send_interim_checksum(zio_cksum_report_t
*report
)
831 zfs_zevent_post(report
->zcr_ereport
, report
->zcr_detector
,
832 zfs_zevent_post_cb_noop
);
837 zfs_ereport_post_checksum(spa_t
*spa
, vdev_t
*vd
,
838 struct zio
*zio
, uint64_t offset
, uint64_t length
,
839 const void *good_data
, const void *bad_data
, zio_bad_cksum_t
*zbc
)
842 nvlist_t
*ereport
= NULL
;
843 nvlist_t
*detector
= NULL
;
844 zfs_ecksum_info_t
*info
;
846 zfs_ereport_start(&ereport
, &detector
,
847 FM_EREPORT_ZFS_CHECKSUM
, spa
, vd
, zio
, offset
, length
);
852 info
= annotate_ecksum(ereport
, zbc
, good_data
, bad_data
, length
,
856 zfs_zevent_post(ereport
, detector
, zfs_zevent_post_cb
);
857 kmem_free(info
, sizeof (*info
));
863 zfs_post_common(spa_t
*spa
, vdev_t
*vd
, const char *name
)
869 if (spa_load_state(spa
) == SPA_LOAD_TRYIMPORT
)
872 if ((resource
= fm_nvlist_create(NULL
)) == NULL
)
875 (void) snprintf(class, sizeof (class), "%s.%s.%s", FM_RSRC_RESOURCE
,
876 ZFS_ERROR_CLASS
, name
);
877 VERIFY0(nvlist_add_uint8(resource
, FM_VERSION
, FM_RSRC_VERSION
));
878 VERIFY0(nvlist_add_string(resource
, FM_CLASS
, class));
879 VERIFY0(nvlist_add_uint64(resource
,
880 FM_EREPORT_PAYLOAD_ZFS_POOL_GUID
, spa_guid(spa
)));
881 VERIFY0(nvlist_add_int32(resource
,
882 FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT
, spa_load_state(spa
)));
885 VERIFY0(nvlist_add_uint64(resource
,
886 FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID
, vd
->vdev_guid
));
887 VERIFY0(nvlist_add_uint64(resource
,
888 FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE
, vd
->vdev_state
));
891 zfs_zevent_post(resource
, NULL
, zfs_zevent_post_cb
);
896 * The 'resource.fs.zfs.removed' event is an internal signal that the given vdev
897 * has been removed from the system. This will cause the DE to ignore any
898 * recent I/O errors, inferring that they are due to the asynchronous device
902 zfs_post_remove(spa_t
*spa
, vdev_t
*vd
)
904 zfs_post_common(spa
, vd
, FM_EREPORT_RESOURCE_REMOVED
);
908 * The 'resource.fs.zfs.autoreplace' event is an internal signal that the pool
909 * has the 'autoreplace' property set, and therefore any broken vdevs will be
910 * handled by higher level logic, and no vdev fault should be generated.
913 zfs_post_autoreplace(spa_t
*spa
, vdev_t
*vd
)
915 zfs_post_common(spa
, vd
, FM_EREPORT_RESOURCE_AUTOREPLACE
);
919 * The 'resource.fs.zfs.statechange' event is an internal signal that the
920 * given vdev has transitioned its state to DEGRADED or HEALTHY. This will
921 * cause the retire agent to repair any outstanding fault management cases
922 * open because the device was not found (fault.fs.zfs.device).
925 zfs_post_state_change(spa_t
*spa
, vdev_t
*vd
)
927 zfs_post_common(spa
, vd
, FM_EREPORT_RESOURCE_STATECHANGE
);
930 #if defined(_KERNEL) && defined(HAVE_SPL)
931 EXPORT_SYMBOL(zfs_ereport_post
);
932 EXPORT_SYMBOL(zfs_ereport_post_checksum
);
933 EXPORT_SYMBOL(zfs_post_remove
);
934 EXPORT_SYMBOL(zfs_post_autoreplace
);
935 EXPORT_SYMBOL(zfs_post_state_change
);