4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2012,2021 by Delphix. All rights reserved.
31 #include <sys/spa_impl.h>
33 #include <sys/vdev_impl.h>
35 #include <sys/zio_checksum.h>
37 #include <sys/fm/fs/zfs.h>
38 #include <sys/fm/protocol.h>
39 #include <sys/fm/util.h>
40 #include <sys/sysevent.h>
43 * This general routine is responsible for generating all the different ZFS
44 * ereports. The payload is dependent on the class, and which arguments are
45 * supplied to the function:
47 * EREPORT POOL VDEV IO
53 * If we are in a loading state, all errors are chained together by the same
54 * SPA-wide ENA (Error Numeric Association).
56 * For isolated I/O requests, we get the ENA from the zio_t. The propagation
57 * gets very complicated due to RAID-Z, gang blocks, and vdev caching. We want
58 * to chain together all ereports associated with a logical piece of data. For
59 * read I/Os, there are basically three 'types' of I/O, which form a roughly
63 * | Aggregate I/O | No associated logical data or device
67 * +---------------+ Reads associated with a piece of logical data.
68 * | Read I/O | This includes reads on behalf of RAID-Z,
69 * +---------------+ mirrors, gang blocks, retries, etc.
72 * +---------------+ Reads associated with a particular device, but
73 * | Physical I/O | no logical data. Issued as part of vdev caching
74 * +---------------+ and I/O aggregation.
76 * Note that 'physical I/O' here is not the same terminology as used in the rest
77 * of ZIO. Typically, 'physical I/O' simply means that there is no attached
78 * blockpointer. But I/O with no associated block pointer can still be related
79 * to a logical piece of data (i.e. RAID-Z requests).
81 * Purely physical I/O always have unique ENAs. They are not related to a
82 * particular piece of logical data, and therefore cannot be chained together.
83 * We still generate an ereport, but the DE doesn't correlate it with any
84 * logical piece of data. When such an I/O fails, the delegated I/O requests
85 * will issue a retry, which will trigger the 'real' ereport with the correct
88 * We keep track of the ENA for a ZIO chain through the 'io_logical' member.
89 * When a new logical I/O is issued, we set this to point to itself. Child I/Os
90 * then inherit this pointer, so that when it is first set subsequent failures
91 * will use the same ENA. For vdev cache fill and queue aggregation I/O,
92 * this pointer is set to NULL, and no ereport will be generated (since it
93 * doesn't actually correspond to any particular device or piece of data,
94 * and the caller will always retry without caching or queueing anyway).
96 * For checksum errors, we want to include more information about the actual
97 * error which occurs. Accordingly, we build an ereport when the error is
98 * noticed, but instead of sending it in immediately, we hang it off of the
99 * io_cksum_report field of the logical IO. When the logical IO completes
100 * (successfully or not), zfs_ereport_finish_checksum() is called with the
101 * good and bad versions of the buffer (if available), and we annotate the
102 * ereport with information about the differences.
107 * Duplicate ereport Detection
109 * Some ereports are retained momentarily for detecting duplicates. These
110 * are kept in a recent_events_node_t in both a time-ordered list and an AVL
111 * tree of recent unique ereports.
113 * The lifespan of these recent ereports is bounded (15 mins) and a cleaner
114 * task is used to purge stale entries.
116 static list_t recent_events_list
;
117 static avl_tree_t recent_events_tree
;
118 static kmutex_t recent_events_lock
;
119 static taskqid_t recent_events_cleaner_tqid
;
122 * Each node is about 128 bytes so 2,000 would consume 1/4 MiB.
124 * This setting can be changed dynamically and setting it to zero
125 * disables duplicate detection.
127 static unsigned int zfs_zevent_retain_max
= 2000;
130 * The lifespan for a recent ereport entry. The default of 15 minutes is
131 * intended to outlive the zfs diagnosis engine's threshold of 10 errors
132 * over a period of 10 minutes.
134 static unsigned int zfs_zevent_retain_expire_secs
= 900;
136 typedef enum zfs_subclass
{
143 /* common criteria */
144 uint64_t re_pool_guid
;
145 uint64_t re_vdev_guid
;
148 uint64_t re_io_offset
;
149 zfs_subclass_t re_subclass
;
150 zio_priority_t re_io_priority
;
152 /* logical zio criteria (optional) */
153 zbookmark_phys_t re_io_bookmark
;
156 avl_node_t re_tree_link
;
157 list_node_t re_list_link
;
158 uint64_t re_timestamp
;
159 } recent_events_node_t
;
162 recent_events_compare(const void *a
, const void *b
)
164 const recent_events_node_t
*node1
= a
;
165 const recent_events_node_t
*node2
= b
;
169 * The comparison order here is somewhat arbitrary.
170 * What's important is that if every criteria matches, then it
171 * is a duplicate (i.e. compare returns 0)
173 if ((cmp
= TREE_CMP(node1
->re_subclass
, node2
->re_subclass
)) != 0)
175 if ((cmp
= TREE_CMP(node1
->re_pool_guid
, node2
->re_pool_guid
)) != 0)
177 if ((cmp
= TREE_CMP(node1
->re_vdev_guid
, node2
->re_vdev_guid
)) != 0)
179 if ((cmp
= TREE_CMP(node1
->re_io_error
, node2
->re_io_error
)) != 0)
181 if ((cmp
= TREE_CMP(node1
->re_io_priority
, node2
->re_io_priority
)) != 0)
183 if ((cmp
= TREE_CMP(node1
->re_io_size
, node2
->re_io_size
)) != 0)
185 if ((cmp
= TREE_CMP(node1
->re_io_offset
, node2
->re_io_offset
)) != 0)
188 const zbookmark_phys_t
*zb1
= &node1
->re_io_bookmark
;
189 const zbookmark_phys_t
*zb2
= &node2
->re_io_bookmark
;
191 if ((cmp
= TREE_CMP(zb1
->zb_objset
, zb2
->zb_objset
)) != 0)
193 if ((cmp
= TREE_CMP(zb1
->zb_object
, zb2
->zb_object
)) != 0)
195 if ((cmp
= TREE_CMP(zb1
->zb_level
, zb2
->zb_level
)) != 0)
197 if ((cmp
= TREE_CMP(zb1
->zb_blkid
, zb2
->zb_blkid
)) != 0)
203 static void zfs_ereport_schedule_cleaner(void);
206 * background task to clean stale recent event nodes.
209 zfs_ereport_cleaner(void *arg
)
211 recent_events_node_t
*entry
;
212 uint64_t now
= gethrtime();
215 * purge expired entries
217 mutex_enter(&recent_events_lock
);
218 while ((entry
= list_tail(&recent_events_list
)) != NULL
) {
219 uint64_t age
= NSEC2SEC(now
- entry
->re_timestamp
);
220 if (age
<= zfs_zevent_retain_expire_secs
)
223 /* remove expired node */
224 avl_remove(&recent_events_tree
, entry
);
225 list_remove(&recent_events_list
, entry
);
226 kmem_free(entry
, sizeof (*entry
));
229 /* Restart the cleaner if more entries remain */
230 recent_events_cleaner_tqid
= 0;
231 if (!list_is_empty(&recent_events_list
))
232 zfs_ereport_schedule_cleaner();
234 mutex_exit(&recent_events_lock
);
238 zfs_ereport_schedule_cleaner(void)
240 ASSERT(MUTEX_HELD(&recent_events_lock
));
242 uint64_t timeout
= SEC2NSEC(zfs_zevent_retain_expire_secs
+ 1);
244 recent_events_cleaner_tqid
= taskq_dispatch_delay(
245 system_delay_taskq
, zfs_ereport_cleaner
, NULL
, TQ_SLEEP
,
246 ddi_get_lbolt() + NSEC_TO_TICK(timeout
));
250 * Clear entries for a given vdev or all vdevs in a pool when vdev == NULL
253 zfs_ereport_clear(spa_t
*spa
, vdev_t
*vd
)
255 uint64_t vdev_guid
, pool_guid
;
258 ASSERT(vd
!= NULL
|| spa
!= NULL
);
261 pool_guid
= spa_guid(spa
);
263 vdev_guid
= vd
->vdev_guid
;
267 mutex_enter(&recent_events_lock
);
269 recent_events_node_t
*next
= list_head(&recent_events_list
);
270 while (next
!= NULL
) {
271 recent_events_node_t
*entry
= next
;
273 next
= list_next(&recent_events_list
, next
);
275 if (entry
->re_vdev_guid
== vdev_guid
||
276 entry
->re_pool_guid
== pool_guid
) {
277 avl_remove(&recent_events_tree
, entry
);
278 list_remove(&recent_events_list
, entry
);
279 kmem_free(entry
, sizeof (*entry
));
284 mutex_exit(&recent_events_lock
);
288 * Check if an ereport would be a duplicate of one recently posted.
290 * An ereport is considered a duplicate if the set of criteria in
291 * recent_events_node_t all match.
293 * Only FM_EREPORT_ZFS_IO, FM_EREPORT_ZFS_DATA, and FM_EREPORT_ZFS_CHECKSUM
294 * are candidates for duplicate checking.
297 zfs_ereport_is_duplicate(const char *subclass
, spa_t
*spa
, vdev_t
*vd
,
298 const zbookmark_phys_t
*zb
, zio_t
*zio
, uint64_t offset
, uint64_t size
)
300 recent_events_node_t search
= {0}, *entry
;
302 if (vd
== NULL
|| zio
== NULL
)
305 if (zfs_zevent_retain_max
== 0)
308 if (strcmp(subclass
, FM_EREPORT_ZFS_IO
) == 0)
309 search
.re_subclass
= ZSC_IO
;
310 else if (strcmp(subclass
, FM_EREPORT_ZFS_DATA
) == 0)
311 search
.re_subclass
= ZSC_DATA
;
312 else if (strcmp(subclass
, FM_EREPORT_ZFS_CHECKSUM
) == 0)
313 search
.re_subclass
= ZSC_CHECKSUM
;
317 search
.re_pool_guid
= spa_guid(spa
);
318 search
.re_vdev_guid
= vd
->vdev_guid
;
319 search
.re_io_error
= zio
->io_error
;
320 search
.re_io_priority
= zio
->io_priority
;
321 /* if size is supplied use it over what's in zio */
323 search
.re_io_size
= size
;
324 search
.re_io_offset
= offset
;
326 search
.re_io_size
= zio
->io_size
;
327 search
.re_io_offset
= zio
->io_offset
;
330 /* grab optional logical zio criteria */
332 search
.re_io_bookmark
.zb_objset
= zb
->zb_objset
;
333 search
.re_io_bookmark
.zb_object
= zb
->zb_object
;
334 search
.re_io_bookmark
.zb_level
= zb
->zb_level
;
335 search
.re_io_bookmark
.zb_blkid
= zb
->zb_blkid
;
338 uint64_t now
= gethrtime();
340 mutex_enter(&recent_events_lock
);
342 /* check if we have seen this one recently */
343 entry
= avl_find(&recent_events_tree
, &search
, NULL
);
345 uint64_t age
= NSEC2SEC(now
- entry
->re_timestamp
);
348 * There is still an active cleaner (since we're here).
349 * Reset the last seen time for this duplicate entry
350 * so that its lifespand gets extended.
352 list_remove(&recent_events_list
, entry
);
353 list_insert_head(&recent_events_list
, entry
);
354 entry
->re_timestamp
= now
;
356 zfs_zevent_track_duplicate();
357 mutex_exit(&recent_events_lock
);
359 return (age
<= zfs_zevent_retain_expire_secs
);
362 if (avl_numnodes(&recent_events_tree
) >= zfs_zevent_retain_max
) {
363 /* recycle oldest node */
364 entry
= list_tail(&recent_events_list
);
365 ASSERT(entry
!= NULL
);
366 list_remove(&recent_events_list
, entry
);
367 avl_remove(&recent_events_tree
, entry
);
369 entry
= kmem_alloc(sizeof (recent_events_node_t
), KM_SLEEP
);
372 /* record this as a recent ereport */
374 avl_add(&recent_events_tree
, entry
);
375 list_insert_head(&recent_events_list
, entry
);
376 entry
->re_timestamp
= now
;
378 /* Start a cleaner if not already scheduled */
379 if (recent_events_cleaner_tqid
== 0)
380 zfs_ereport_schedule_cleaner();
382 mutex_exit(&recent_events_lock
);
387 zfs_zevent_post_cb(nvlist_t
*nvl
, nvlist_t
*detector
)
390 fm_nvlist_destroy(nvl
, FM_NVA_FREE
);
393 fm_nvlist_destroy(detector
, FM_NVA_FREE
);
397 * We want to rate limit ZIO delay, deadman, and checksum events so as to not
398 * flood zevent consumers when a disk is acting up.
400 * Returns 1 if we're ratelimiting, 0 if not.
403 zfs_is_ratelimiting_event(const char *subclass
, vdev_t
*vd
)
407 * zfs_ratelimit() returns 1 if we're *not* ratelimiting and 0 if we
408 * are. Invert it to get our return value.
410 if (strcmp(subclass
, FM_EREPORT_ZFS_DELAY
) == 0) {
411 rc
= !zfs_ratelimit(&vd
->vdev_delay_rl
);
412 } else if (strcmp(subclass
, FM_EREPORT_ZFS_DEADMAN
) == 0) {
413 rc
= !zfs_ratelimit(&vd
->vdev_deadman_rl
);
414 } else if (strcmp(subclass
, FM_EREPORT_ZFS_CHECKSUM
) == 0) {
415 rc
= !zfs_ratelimit(&vd
->vdev_checksum_rl
);
419 /* We're rate limiting */
420 fm_erpt_dropped_increment();
427 * Return B_TRUE if the event actually posted, B_FALSE if not.
430 zfs_ereport_start(nvlist_t
**ereport_out
, nvlist_t
**detector_out
,
431 const char *subclass
, spa_t
*spa
, vdev_t
*vd
, const zbookmark_phys_t
*zb
,
432 zio_t
*zio
, uint64_t stateoroffset
, uint64_t size
)
434 nvlist_t
*ereport
, *detector
;
439 if ((ereport
= fm_nvlist_create(NULL
)) == NULL
)
442 if ((detector
= fm_nvlist_create(NULL
)) == NULL
) {
443 fm_nvlist_destroy(ereport
, FM_NVA_FREE
);
448 * Serialize ereport generation
450 mutex_enter(&spa
->spa_errlist_lock
);
453 * Determine the ENA to use for this event. If we are in a loading
454 * state, use a SPA-wide ENA. Otherwise, if we are in an I/O state, use
455 * a root zio-wide ENA. Otherwise, simply use a unique ENA.
457 if (spa_load_state(spa
) != SPA_LOAD_NONE
) {
458 if (spa
->spa_ena
== 0)
459 spa
->spa_ena
= fm_ena_generate(0, FM_ENA_FMT1
);
461 } else if (zio
!= NULL
&& zio
->io_logical
!= NULL
) {
462 if (zio
->io_logical
->io_ena
== 0)
463 zio
->io_logical
->io_ena
=
464 fm_ena_generate(0, FM_ENA_FMT1
);
465 ena
= zio
->io_logical
->io_ena
;
467 ena
= fm_ena_generate(0, FM_ENA_FMT1
);
471 * Construct the full class, detector, and other standard FMA fields.
473 (void) snprintf(class, sizeof (class), "%s.%s",
474 ZFS_ERROR_CLASS
, subclass
);
476 fm_fmri_zfs_set(detector
, FM_ZFS_SCHEME_VERSION
, spa_guid(spa
),
477 vd
!= NULL
? vd
->vdev_guid
: 0);
479 fm_ereport_set(ereport
, FM_EREPORT_VERSION
, class, ena
, detector
, NULL
);
482 * Construct the per-ereport payload, depending on which parameters are
487 * Generic payload members common to all ereports.
489 fm_payload_set(ereport
,
490 FM_EREPORT_PAYLOAD_ZFS_POOL
, DATA_TYPE_STRING
, spa_name(spa
),
491 FM_EREPORT_PAYLOAD_ZFS_POOL_GUID
, DATA_TYPE_UINT64
, spa_guid(spa
),
492 FM_EREPORT_PAYLOAD_ZFS_POOL_STATE
, DATA_TYPE_UINT64
,
493 (uint64_t)spa_state(spa
),
494 FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT
, DATA_TYPE_INT32
,
495 (int32_t)spa_load_state(spa
), NULL
);
497 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_POOL_FAILMODE
,
499 spa_get_failmode(spa
) == ZIO_FAILURE_MODE_WAIT
?
500 FM_EREPORT_FAILMODE_WAIT
:
501 spa_get_failmode(spa
) == ZIO_FAILURE_MODE_CONTINUE
?
502 FM_EREPORT_FAILMODE_CONTINUE
: FM_EREPORT_FAILMODE_PANIC
,
506 vdev_t
*pvd
= vd
->vdev_parent
;
507 vdev_queue_t
*vq
= &vd
->vdev_queue
;
508 vdev_stat_t
*vs
= &vd
->vdev_stat
;
510 uint64_t *spare_guids
;
514 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID
,
515 DATA_TYPE_UINT64
, vd
->vdev_guid
,
516 FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE
,
517 DATA_TYPE_STRING
, vd
->vdev_ops
->vdev_op_type
, NULL
);
518 if (vd
->vdev_path
!= NULL
)
519 fm_payload_set(ereport
,
520 FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH
,
521 DATA_TYPE_STRING
, vd
->vdev_path
, NULL
);
522 if (vd
->vdev_devid
!= NULL
)
523 fm_payload_set(ereport
,
524 FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID
,
525 DATA_TYPE_STRING
, vd
->vdev_devid
, NULL
);
526 if (vd
->vdev_fru
!= NULL
)
527 fm_payload_set(ereport
,
528 FM_EREPORT_PAYLOAD_ZFS_VDEV_FRU
,
529 DATA_TYPE_STRING
, vd
->vdev_fru
, NULL
);
530 if (vd
->vdev_enc_sysfs_path
!= NULL
)
531 fm_payload_set(ereport
,
532 FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH
,
533 DATA_TYPE_STRING
, vd
->vdev_enc_sysfs_path
, NULL
);
535 fm_payload_set(ereport
,
536 FM_EREPORT_PAYLOAD_ZFS_VDEV_ASHIFT
,
537 DATA_TYPE_UINT64
, vd
->vdev_ashift
, NULL
);
540 fm_payload_set(ereport
,
541 FM_EREPORT_PAYLOAD_ZFS_VDEV_COMP_TS
,
542 DATA_TYPE_UINT64
, vq
->vq_io_complete_ts
, NULL
);
543 fm_payload_set(ereport
,
544 FM_EREPORT_PAYLOAD_ZFS_VDEV_DELTA_TS
,
545 DATA_TYPE_UINT64
, vq
->vq_io_delta_ts
, NULL
);
549 fm_payload_set(ereport
,
550 FM_EREPORT_PAYLOAD_ZFS_VDEV_READ_ERRORS
,
551 DATA_TYPE_UINT64
, vs
->vs_read_errors
,
552 FM_EREPORT_PAYLOAD_ZFS_VDEV_WRITE_ERRORS
,
553 DATA_TYPE_UINT64
, vs
->vs_write_errors
,
554 FM_EREPORT_PAYLOAD_ZFS_VDEV_CKSUM_ERRORS
,
555 DATA_TYPE_UINT64
, vs
->vs_checksum_errors
,
556 FM_EREPORT_PAYLOAD_ZFS_VDEV_DELAYS
,
557 DATA_TYPE_UINT64
, vs
->vs_slow_ios
,
562 fm_payload_set(ereport
,
563 FM_EREPORT_PAYLOAD_ZFS_PARENT_GUID
,
564 DATA_TYPE_UINT64
, pvd
->vdev_guid
,
565 FM_EREPORT_PAYLOAD_ZFS_PARENT_TYPE
,
566 DATA_TYPE_STRING
, pvd
->vdev_ops
->vdev_op_type
,
569 fm_payload_set(ereport
,
570 FM_EREPORT_PAYLOAD_ZFS_PARENT_PATH
,
571 DATA_TYPE_STRING
, pvd
->vdev_path
, NULL
);
573 fm_payload_set(ereport
,
574 FM_EREPORT_PAYLOAD_ZFS_PARENT_DEVID
,
575 DATA_TYPE_STRING
, pvd
->vdev_devid
, NULL
);
578 spare_count
= spa
->spa_spares
.sav_count
;
579 spare_paths
= kmem_zalloc(sizeof (char *) * spare_count
,
581 spare_guids
= kmem_zalloc(sizeof (uint64_t) * spare_count
,
584 for (i
= 0; i
< spare_count
; i
++) {
585 spare_vd
= spa
->spa_spares
.sav_vdevs
[i
];
587 spare_paths
[i
] = spare_vd
->vdev_path
;
588 spare_guids
[i
] = spare_vd
->vdev_guid
;
592 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_VDEV_SPARE_PATHS
,
593 DATA_TYPE_STRING_ARRAY
, spare_count
, spare_paths
,
594 FM_EREPORT_PAYLOAD_ZFS_VDEV_SPARE_GUIDS
,
595 DATA_TYPE_UINT64_ARRAY
, spare_count
, spare_guids
, NULL
);
597 kmem_free(spare_guids
, sizeof (uint64_t) * spare_count
);
598 kmem_free(spare_paths
, sizeof (char *) * spare_count
);
603 * Payload common to all I/Os.
605 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_ERR
,
606 DATA_TYPE_INT32
, zio
->io_error
, NULL
);
607 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS
,
608 DATA_TYPE_INT32
, zio
->io_flags
, NULL
);
609 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE
,
610 DATA_TYPE_UINT32
, zio
->io_stage
, NULL
);
611 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE
,
612 DATA_TYPE_UINT32
, zio
->io_pipeline
, NULL
);
613 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_DELAY
,
614 DATA_TYPE_UINT64
, zio
->io_delay
, NULL
);
615 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_TIMESTAMP
,
616 DATA_TYPE_UINT64
, zio
->io_timestamp
, NULL
);
617 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_DELTA
,
618 DATA_TYPE_UINT64
, zio
->io_delta
, NULL
);
619 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY
,
620 DATA_TYPE_UINT32
, zio
->io_priority
, NULL
);
623 * If the 'size' parameter is non-zero, it indicates this is a
624 * RAID-Z or other I/O where the physical offset and length are
625 * provided for us, instead of within the zio_t.
629 fm_payload_set(ereport
,
630 FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET
,
631 DATA_TYPE_UINT64
, stateoroffset
,
632 FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE
,
633 DATA_TYPE_UINT64
, size
, NULL
);
635 fm_payload_set(ereport
,
636 FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET
,
637 DATA_TYPE_UINT64
, zio
->io_offset
,
638 FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE
,
639 DATA_TYPE_UINT64
, zio
->io_size
, NULL
);
641 } else if (vd
!= NULL
) {
643 * If we have a vdev but no zio, this is a device fault, and the
644 * 'stateoroffset' parameter indicates the previous state of the
647 fm_payload_set(ereport
,
648 FM_EREPORT_PAYLOAD_ZFS_PREV_STATE
,
649 DATA_TYPE_UINT64
, stateoroffset
, NULL
);
653 * Payload for I/Os with corresponding logical information.
655 if (zb
!= NULL
&& (zio
== NULL
|| zio
->io_logical
!= NULL
)) {
656 fm_payload_set(ereport
,
657 FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJSET
,
658 DATA_TYPE_UINT64
, zb
->zb_objset
,
659 FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJECT
,
660 DATA_TYPE_UINT64
, zb
->zb_object
,
661 FM_EREPORT_PAYLOAD_ZFS_ZIO_LEVEL
,
662 DATA_TYPE_INT64
, zb
->zb_level
,
663 FM_EREPORT_PAYLOAD_ZFS_ZIO_BLKID
,
664 DATA_TYPE_UINT64
, zb
->zb_blkid
, NULL
);
667 mutex_exit(&spa
->spa_errlist_lock
);
669 *ereport_out
= ereport
;
670 *detector_out
= detector
;
674 /* if it's <= 128 bytes, save the corruption directly */
675 #define ZFM_MAX_INLINE (128 / sizeof (uint64_t))
677 #define MAX_RANGES 16
679 typedef struct zfs_ecksum_info
{
680 /* histograms of set and cleared bits by bit number in a 64-bit word */
681 uint32_t zei_histogram_set
[sizeof (uint64_t) * NBBY
];
682 uint32_t zei_histogram_cleared
[sizeof (uint64_t) * NBBY
];
684 /* inline arrays of bits set and cleared. */
685 uint64_t zei_bits_set
[ZFM_MAX_INLINE
];
686 uint64_t zei_bits_cleared
[ZFM_MAX_INLINE
];
689 * for each range, the number of bits set and cleared. The Hamming
690 * distance between the good and bad buffers is the sum of them all.
692 uint32_t zei_range_sets
[MAX_RANGES
];
693 uint32_t zei_range_clears
[MAX_RANGES
];
698 } zei_ranges
[MAX_RANGES
];
700 size_t zei_range_count
;
702 uint32_t zei_allowed_mingap
;
707 update_histogram(uint64_t value_arg
, uint32_t *hist
, uint32_t *count
)
711 uint64_t value
= BE_64(value_arg
);
713 /* We store the bits in big-endian (largest-first) order */
714 for (i
= 0; i
< 64; i
++) {
715 if (value
& (1ull << i
)) {
720 /* update the count of bits changed */
725 * We've now filled up the range array, and need to increase "mingap" and
726 * shrink the range list accordingly. zei_mingap is always the smallest
727 * distance between array entries, so we set the new_allowed_gap to be
728 * one greater than that. We then go through the list, joining together
729 * any ranges which are closer than the new_allowed_gap.
731 * By construction, there will be at least one. We also update zei_mingap
732 * to the new smallest gap, to prepare for our next invocation.
735 zei_shrink_ranges(zfs_ecksum_info_t
*eip
)
737 uint32_t mingap
= UINT32_MAX
;
738 uint32_t new_allowed_gap
= eip
->zei_mingap
+ 1;
741 size_t max
= eip
->zei_range_count
;
743 struct zei_ranges
*r
= eip
->zei_ranges
;
745 ASSERT3U(eip
->zei_range_count
, >, 0);
746 ASSERT3U(eip
->zei_range_count
, <=, MAX_RANGES
);
749 while (idx
< max
- 1) {
750 uint32_t start
= r
[idx
].zr_start
;
751 uint32_t end
= r
[idx
].zr_end
;
753 while (idx
< max
- 1) {
756 uint32_t nstart
= r
[idx
].zr_start
;
757 uint32_t nend
= r
[idx
].zr_end
;
759 uint32_t gap
= nstart
- end
;
760 if (gap
< new_allowed_gap
) {
768 r
[output
].zr_start
= start
;
769 r
[output
].zr_end
= end
;
772 ASSERT3U(output
, <, eip
->zei_range_count
);
773 eip
->zei_range_count
= output
;
774 eip
->zei_mingap
= mingap
;
775 eip
->zei_allowed_mingap
= new_allowed_gap
;
779 zei_add_range(zfs_ecksum_info_t
*eip
, int start
, int end
)
781 struct zei_ranges
*r
= eip
->zei_ranges
;
782 size_t count
= eip
->zei_range_count
;
784 if (count
>= MAX_RANGES
) {
785 zei_shrink_ranges(eip
);
786 count
= eip
->zei_range_count
;
789 eip
->zei_mingap
= UINT32_MAX
;
790 eip
->zei_allowed_mingap
= 1;
792 int gap
= start
- r
[count
- 1].zr_end
;
794 if (gap
< eip
->zei_allowed_mingap
) {
795 r
[count
- 1].zr_end
= end
;
798 if (gap
< eip
->zei_mingap
)
799 eip
->zei_mingap
= gap
;
801 r
[count
].zr_start
= start
;
802 r
[count
].zr_end
= end
;
803 eip
->zei_range_count
++;
807 zei_range_total_size(zfs_ecksum_info_t
*eip
)
809 struct zei_ranges
*r
= eip
->zei_ranges
;
810 size_t count
= eip
->zei_range_count
;
814 for (idx
= 0; idx
< count
; idx
++)
815 result
+= (r
[idx
].zr_end
- r
[idx
].zr_start
);
820 static zfs_ecksum_info_t
*
821 annotate_ecksum(nvlist_t
*ereport
, zio_bad_cksum_t
*info
,
822 const abd_t
*goodabd
, const abd_t
*badabd
, size_t size
,
823 boolean_t drop_if_identical
)
825 const uint64_t *good
;
828 size_t nui64s
= size
/ sizeof (uint64_t);
838 zfs_ecksum_info_t
*eip
= kmem_zalloc(sizeof (*eip
), KM_SLEEP
);
840 /* don't do any annotation for injected checksum errors */
841 if (info
!= NULL
&& info
->zbc_injected
)
844 if (info
!= NULL
&& info
->zbc_has_cksum
) {
845 fm_payload_set(ereport
,
846 FM_EREPORT_PAYLOAD_ZFS_CKSUM_EXPECTED
,
847 DATA_TYPE_UINT64_ARRAY
,
848 sizeof (info
->zbc_expected
) / sizeof (uint64_t),
849 (uint64_t *)&info
->zbc_expected
,
850 FM_EREPORT_PAYLOAD_ZFS_CKSUM_ACTUAL
,
851 DATA_TYPE_UINT64_ARRAY
,
852 sizeof (info
->zbc_actual
) / sizeof (uint64_t),
853 (uint64_t *)&info
->zbc_actual
,
854 FM_EREPORT_PAYLOAD_ZFS_CKSUM_ALGO
,
856 info
->zbc_checksum_name
,
859 if (info
->zbc_byteswapped
) {
860 fm_payload_set(ereport
,
861 FM_EREPORT_PAYLOAD_ZFS_CKSUM_BYTESWAP
,
862 DATA_TYPE_BOOLEAN
, 1,
867 if (badabd
== NULL
|| goodabd
== NULL
)
870 ASSERT3U(nui64s
, <=, UINT32_MAX
);
871 ASSERT3U(size
, ==, nui64s
* sizeof (uint64_t));
872 ASSERT3U(size
, <=, SPA_MAXBLOCKSIZE
);
873 ASSERT3U(size
, <=, UINT32_MAX
);
875 good
= (const uint64_t *) abd_borrow_buf_copy((abd_t
*)goodabd
, size
);
876 bad
= (const uint64_t *) abd_borrow_buf_copy((abd_t
*)badabd
, size
);
878 /* build up the range list by comparing the two buffers. */
879 for (idx
= 0; idx
< nui64s
; idx
++) {
880 if (good
[idx
] == bad
[idx
]) {
884 zei_add_range(eip
, start
, idx
);
894 zei_add_range(eip
, start
, idx
);
896 /* See if it will fit in our inline buffers */
897 inline_size
= zei_range_total_size(eip
);
898 if (inline_size
> ZFM_MAX_INLINE
)
902 * If there is no change and we want to drop if the buffers are
905 if (inline_size
== 0 && drop_if_identical
) {
906 kmem_free(eip
, sizeof (*eip
));
907 abd_return_buf((abd_t
*)goodabd
, (void *)good
, size
);
908 abd_return_buf((abd_t
*)badabd
, (void *)bad
, size
);
913 * Now walk through the ranges, filling in the details of the
914 * differences. Also convert our uint64_t-array offsets to byte
917 for (range
= 0; range
< eip
->zei_range_count
; range
++) {
918 size_t start
= eip
->zei_ranges
[range
].zr_start
;
919 size_t end
= eip
->zei_ranges
[range
].zr_end
;
921 for (idx
= start
; idx
< end
; idx
++) {
922 uint64_t set
, cleared
;
924 // bits set in bad, but not in good
925 set
= ((~good
[idx
]) & bad
[idx
]);
926 // bits set in good, but not in bad
927 cleared
= (good
[idx
] & (~bad
[idx
]));
930 ASSERT3U(offset
, <, inline_size
);
931 eip
->zei_bits_set
[offset
] = set
;
932 eip
->zei_bits_cleared
[offset
] = cleared
;
936 update_histogram(set
, eip
->zei_histogram_set
,
937 &eip
->zei_range_sets
[range
]);
938 update_histogram(cleared
, eip
->zei_histogram_cleared
,
939 &eip
->zei_range_clears
[range
]);
942 /* convert to byte offsets */
943 eip
->zei_ranges
[range
].zr_start
*= sizeof (uint64_t);
944 eip
->zei_ranges
[range
].zr_end
*= sizeof (uint64_t);
947 abd_return_buf((abd_t
*)goodabd
, (void *)good
, size
);
948 abd_return_buf((abd_t
*)badabd
, (void *)bad
, size
);
950 eip
->zei_allowed_mingap
*= sizeof (uint64_t);
951 inline_size
*= sizeof (uint64_t);
953 /* fill in ereport */
954 fm_payload_set(ereport
,
955 FM_EREPORT_PAYLOAD_ZFS_BAD_OFFSET_RANGES
,
956 DATA_TYPE_UINT32_ARRAY
, 2 * eip
->zei_range_count
,
957 (uint32_t *)eip
->zei_ranges
,
958 FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_MIN_GAP
,
959 DATA_TYPE_UINT32
, eip
->zei_allowed_mingap
,
960 FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_SETS
,
961 DATA_TYPE_UINT32_ARRAY
, eip
->zei_range_count
, eip
->zei_range_sets
,
962 FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_CLEARS
,
963 DATA_TYPE_UINT32_ARRAY
, eip
->zei_range_count
, eip
->zei_range_clears
,
967 fm_payload_set(ereport
,
968 FM_EREPORT_PAYLOAD_ZFS_BAD_SET_BITS
,
969 DATA_TYPE_UINT8_ARRAY
,
970 inline_size
, (uint8_t *)eip
->zei_bits_set
,
971 FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_BITS
,
972 DATA_TYPE_UINT8_ARRAY
,
973 inline_size
, (uint8_t *)eip
->zei_bits_cleared
,
976 fm_payload_set(ereport
,
977 FM_EREPORT_PAYLOAD_ZFS_BAD_SET_HISTOGRAM
,
978 DATA_TYPE_UINT32_ARRAY
,
979 NBBY
* sizeof (uint64_t), eip
->zei_histogram_set
,
980 FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_HISTOGRAM
,
981 DATA_TYPE_UINT32_ARRAY
,
982 NBBY
* sizeof (uint64_t), eip
->zei_histogram_cleared
,
989 zfs_ereport_clear(spa_t
*spa
, vdev_t
*vd
)
991 (void) spa
, (void) vd
;
996 * Make sure our event is still valid for the given zio/vdev/pool. For example,
997 * we don't want to keep logging events for a faulted or missing vdev.
1000 zfs_ereport_is_valid(const char *subclass
, spa_t
*spa
, vdev_t
*vd
, zio_t
*zio
)
1004 * If we are doing a spa_tryimport() or in recovery mode,
1007 if (spa_load_state(spa
) == SPA_LOAD_TRYIMPORT
||
1008 spa_load_state(spa
) == SPA_LOAD_RECOVER
)
1012 * If we are in the middle of opening a pool, and the previous attempt
1013 * failed, don't bother logging any new ereports - we're just going to
1014 * get the same diagnosis anyway.
1016 if (spa_load_state(spa
) != SPA_LOAD_NONE
&&
1017 spa
->spa_last_open_failed
)
1022 * If this is not a read or write zio, ignore the error. This
1023 * can occur if the DKIOCFLUSHWRITECACHE ioctl fails.
1025 if (zio
->io_type
!= ZIO_TYPE_READ
&&
1026 zio
->io_type
!= ZIO_TYPE_WRITE
)
1031 * If the vdev has already been marked as failing due
1032 * to a failed probe, then ignore any subsequent I/O
1033 * errors, as the DE will automatically fault the vdev
1034 * on the first such failure. This also catches cases
1035 * where vdev_remove_wanted is set and the device has
1036 * not yet been asynchronously placed into the REMOVED
1039 if (zio
->io_vd
== vd
&& !vdev_accessible(vd
, zio
))
1043 * Ignore checksum errors for reads from DTL regions of
1046 if (zio
->io_type
== ZIO_TYPE_READ
&&
1047 zio
->io_error
== ECKSUM
&&
1048 vd
->vdev_ops
->vdev_op_leaf
&&
1049 vdev_dtl_contains(vd
, DTL_MISSING
, zio
->io_txg
, 1))
1055 * For probe failure, we want to avoid posting ereports if we've
1056 * already removed the device in the meantime.
1059 strcmp(subclass
, FM_EREPORT_ZFS_PROBE_FAILURE
) == 0 &&
1060 (vd
->vdev_remove_wanted
|| vd
->vdev_state
== VDEV_STATE_REMOVED
))
1063 /* Ignore bogus delay events (like from ioctls or unqueued IOs) */
1064 if ((strcmp(subclass
, FM_EREPORT_ZFS_DELAY
) == 0) &&
1065 (zio
!= NULL
) && (!zio
->io_timestamp
)) {
1069 (void) subclass
, (void) spa
, (void) vd
, (void) zio
;
1075 * Post an ereport for the given subclass
1078 * - 0 if an event was posted
1079 * - EINVAL if there was a problem posting event
1080 * - EBUSY if the event was rate limited
1081 * - EALREADY if the event was already posted (duplicate)
1084 zfs_ereport_post(const char *subclass
, spa_t
*spa
, vdev_t
*vd
,
1085 const zbookmark_phys_t
*zb
, zio_t
*zio
, uint64_t state
)
1089 nvlist_t
*ereport
= NULL
;
1090 nvlist_t
*detector
= NULL
;
1092 if (!zfs_ereport_is_valid(subclass
, spa
, vd
, zio
))
1095 if (zfs_ereport_is_duplicate(subclass
, spa
, vd
, zb
, zio
, 0, 0))
1096 return (SET_ERROR(EALREADY
));
1098 if (zfs_is_ratelimiting_event(subclass
, vd
))
1099 return (SET_ERROR(EBUSY
));
1101 if (!zfs_ereport_start(&ereport
, &detector
, subclass
, spa
, vd
,
1103 return (SET_ERROR(EINVAL
)); /* couldn't post event */
1105 if (ereport
== NULL
)
1106 return (SET_ERROR(EINVAL
));
1108 /* Cleanup is handled by the callback function */
1109 rc
= zfs_zevent_post(ereport
, detector
, zfs_zevent_post_cb
);
1111 (void) subclass
, (void) spa
, (void) vd
, (void) zb
, (void) zio
,
1118 * Prepare a checksum ereport
1121 * - 0 if an event was posted
1122 * - EINVAL if there was a problem posting event
1123 * - EBUSY if the event was rate limited
1124 * - EALREADY if the event was already posted (duplicate)
1127 zfs_ereport_start_checksum(spa_t
*spa
, vdev_t
*vd
, const zbookmark_phys_t
*zb
,
1128 struct zio
*zio
, uint64_t offset
, uint64_t length
, zio_bad_cksum_t
*info
)
1130 zio_cksum_report_t
*report
;
1133 if (!zfs_ereport_is_valid(FM_EREPORT_ZFS_CHECKSUM
, spa
, vd
, zio
))
1134 return (SET_ERROR(EINVAL
));
1136 if (zfs_ereport_is_duplicate(FM_EREPORT_ZFS_CHECKSUM
, spa
, vd
, zb
, zio
,
1138 return (SET_ERROR(EALREADY
));
1140 if (zfs_is_ratelimiting_event(FM_EREPORT_ZFS_CHECKSUM
, vd
))
1141 return (SET_ERROR(EBUSY
));
1143 (void) zb
, (void) offset
;
1146 report
= kmem_zalloc(sizeof (*report
), KM_SLEEP
);
1148 zio_vsd_default_cksum_report(zio
, report
);
1150 /* copy the checksum failure information if it was provided */
1152 report
->zcr_ckinfo
= kmem_zalloc(sizeof (*info
), KM_SLEEP
);
1153 bcopy(info
, report
->zcr_ckinfo
, sizeof (*info
));
1156 report
->zcr_sector
= 1ULL << vd
->vdev_top
->vdev_ashift
;
1158 vdev_psize_to_asize(vd
->vdev_top
, report
->zcr_sector
);
1159 report
->zcr_length
= length
;
1162 (void) zfs_ereport_start(&report
->zcr_ereport
, &report
->zcr_detector
,
1163 FM_EREPORT_ZFS_CHECKSUM
, spa
, vd
, zb
, zio
, offset
, length
);
1165 if (report
->zcr_ereport
== NULL
) {
1166 zfs_ereport_free_checksum(report
);
1171 mutex_enter(&spa
->spa_errlist_lock
);
1172 report
->zcr_next
= zio
->io_logical
->io_cksum_report
;
1173 zio
->io_logical
->io_cksum_report
= report
;
1174 mutex_exit(&spa
->spa_errlist_lock
);
1179 zfs_ereport_finish_checksum(zio_cksum_report_t
*report
, const abd_t
*good_data
,
1180 const abd_t
*bad_data
, boolean_t drop_if_identical
)
1183 zfs_ecksum_info_t
*info
;
1185 info
= annotate_ecksum(report
->zcr_ereport
, report
->zcr_ckinfo
,
1186 good_data
, bad_data
, report
->zcr_length
, drop_if_identical
);
1188 zfs_zevent_post(report
->zcr_ereport
,
1189 report
->zcr_detector
, zfs_zevent_post_cb
);
1191 zfs_zevent_post_cb(report
->zcr_ereport
, report
->zcr_detector
);
1193 report
->zcr_ereport
= report
->zcr_detector
= NULL
;
1195 kmem_free(info
, sizeof (*info
));
1197 (void) report
, (void) good_data
, (void) bad_data
,
1198 (void) drop_if_identical
;
1203 zfs_ereport_free_checksum(zio_cksum_report_t
*rpt
)
1206 if (rpt
->zcr_ereport
!= NULL
) {
1207 fm_nvlist_destroy(rpt
->zcr_ereport
,
1209 fm_nvlist_destroy(rpt
->zcr_detector
,
1213 rpt
->zcr_free(rpt
->zcr_cbdata
, rpt
->zcr_cbinfo
);
1215 if (rpt
->zcr_ckinfo
!= NULL
)
1216 kmem_free(rpt
->zcr_ckinfo
, sizeof (*rpt
->zcr_ckinfo
));
1218 kmem_free(rpt
, sizeof (*rpt
));
1222 * Post a checksum ereport
1225 * - 0 if an event was posted
1226 * - EINVAL if there was a problem posting event
1227 * - EBUSY if the event was rate limited
1228 * - EALREADY if the event was already posted (duplicate)
1231 zfs_ereport_post_checksum(spa_t
*spa
, vdev_t
*vd
, const zbookmark_phys_t
*zb
,
1232 struct zio
*zio
, uint64_t offset
, uint64_t length
,
1233 const abd_t
*good_data
, const abd_t
*bad_data
, zio_bad_cksum_t
*zbc
)
1237 nvlist_t
*ereport
= NULL
;
1238 nvlist_t
*detector
= NULL
;
1239 zfs_ecksum_info_t
*info
;
1241 if (!zfs_ereport_is_valid(FM_EREPORT_ZFS_CHECKSUM
, spa
, vd
, zio
))
1242 return (SET_ERROR(EINVAL
));
1244 if (zfs_ereport_is_duplicate(FM_EREPORT_ZFS_CHECKSUM
, spa
, vd
, zb
, zio
,
1246 return (SET_ERROR(EALREADY
));
1248 if (zfs_is_ratelimiting_event(FM_EREPORT_ZFS_CHECKSUM
, vd
))
1249 return (SET_ERROR(EBUSY
));
1251 if (!zfs_ereport_start(&ereport
, &detector
, FM_EREPORT_ZFS_CHECKSUM
,
1252 spa
, vd
, zb
, zio
, offset
, length
) || (ereport
== NULL
)) {
1253 return (SET_ERROR(EINVAL
));
1256 info
= annotate_ecksum(ereport
, zbc
, good_data
, bad_data
, length
,
1260 rc
= zfs_zevent_post(ereport
, detector
, zfs_zevent_post_cb
);
1261 kmem_free(info
, sizeof (*info
));
1264 (void) spa
, (void) vd
, (void) zb
, (void) zio
, (void) offset
,
1265 (void) length
, (void) good_data
, (void) bad_data
, (void) zbc
;
1271 * The 'sysevent.fs.zfs.*' events are signals posted to notify user space of
1272 * change in the pool. All sysevents are listed in sys/sysevent/eventdefs.h
1273 * and are designed to be consumed by the ZFS Event Daemon (ZED). For
1274 * additional details refer to the zed(8) man page.
1277 zfs_event_create(spa_t
*spa
, vdev_t
*vd
, const char *type
, const char *name
,
1280 nvlist_t
*resource
= NULL
;
1284 if (spa_load_state(spa
) == SPA_LOAD_TRYIMPORT
)
1287 if ((resource
= fm_nvlist_create(NULL
)) == NULL
)
1290 (void) snprintf(class, sizeof (class), "%s.%s.%s", type
,
1291 ZFS_ERROR_CLASS
, name
);
1292 VERIFY0(nvlist_add_uint8(resource
, FM_VERSION
, FM_RSRC_VERSION
));
1293 VERIFY0(nvlist_add_string(resource
, FM_CLASS
, class));
1294 VERIFY0(nvlist_add_string(resource
,
1295 FM_EREPORT_PAYLOAD_ZFS_POOL
, spa_name(spa
)));
1296 VERIFY0(nvlist_add_uint64(resource
,
1297 FM_EREPORT_PAYLOAD_ZFS_POOL_GUID
, spa_guid(spa
)));
1298 VERIFY0(nvlist_add_uint64(resource
,
1299 FM_EREPORT_PAYLOAD_ZFS_POOL_STATE
, spa_state(spa
)));
1300 VERIFY0(nvlist_add_int32(resource
,
1301 FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT
, spa_load_state(spa
)));
1304 VERIFY0(nvlist_add_uint64(resource
,
1305 FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID
, vd
->vdev_guid
));
1306 VERIFY0(nvlist_add_uint64(resource
,
1307 FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE
, vd
->vdev_state
));
1308 if (vd
->vdev_path
!= NULL
)
1309 VERIFY0(nvlist_add_string(resource
,
1310 FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH
, vd
->vdev_path
));
1311 if (vd
->vdev_devid
!= NULL
)
1312 VERIFY0(nvlist_add_string(resource
,
1313 FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID
, vd
->vdev_devid
));
1314 if (vd
->vdev_fru
!= NULL
)
1315 VERIFY0(nvlist_add_string(resource
,
1316 FM_EREPORT_PAYLOAD_ZFS_VDEV_FRU
, vd
->vdev_fru
));
1317 if (vd
->vdev_enc_sysfs_path
!= NULL
)
1318 VERIFY0(nvlist_add_string(resource
,
1319 FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH
,
1320 vd
->vdev_enc_sysfs_path
));
1323 /* also copy any optional payload data */
1325 nvpair_t
*elem
= NULL
;
1327 while ((elem
= nvlist_next_nvpair(aux
, elem
)) != NULL
)
1328 (void) nvlist_add_nvpair(resource
, elem
);
1331 (void) spa
, (void) vd
, (void) type
, (void) name
, (void) aux
;
1337 zfs_post_common(spa_t
*spa
, vdev_t
*vd
, const char *type
, const char *name
,
1343 resource
= zfs_event_create(spa
, vd
, type
, name
, aux
);
1345 zfs_zevent_post(resource
, NULL
, zfs_zevent_post_cb
);
1347 (void) spa
, (void) vd
, (void) type
, (void) name
, (void) aux
;
1352 * The 'resource.fs.zfs.removed' event is an internal signal that the given vdev
1353 * has been removed from the system. This will cause the DE to ignore any
1354 * recent I/O errors, inferring that they are due to the asynchronous device
1358 zfs_post_remove(spa_t
*spa
, vdev_t
*vd
)
1360 zfs_post_common(spa
, vd
, FM_RSRC_CLASS
, FM_RESOURCE_REMOVED
, NULL
);
1364 * The 'resource.fs.zfs.autoreplace' event is an internal signal that the pool
1365 * has the 'autoreplace' property set, and therefore any broken vdevs will be
1366 * handled by higher level logic, and no vdev fault should be generated.
1369 zfs_post_autoreplace(spa_t
*spa
, vdev_t
*vd
)
1371 zfs_post_common(spa
, vd
, FM_RSRC_CLASS
, FM_RESOURCE_AUTOREPLACE
, NULL
);
1375 * The 'resource.fs.zfs.statechange' event is an internal signal that the
1376 * given vdev has transitioned its state to DEGRADED or HEALTHY. This will
1377 * cause the retire agent to repair any outstanding fault management cases
1378 * open because the device was not found (fault.fs.zfs.device).
1381 zfs_post_state_change(spa_t
*spa
, vdev_t
*vd
, uint64_t laststate
)
1387 * Add optional supplemental keys to payload
1389 aux
= fm_nvlist_create(NULL
);
1391 if (vd
->vdev_physpath
) {
1392 (void) nvlist_add_string(aux
,
1393 FM_EREPORT_PAYLOAD_ZFS_VDEV_PHYSPATH
,
1396 if (vd
->vdev_enc_sysfs_path
) {
1397 (void) nvlist_add_string(aux
,
1398 FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH
,
1399 vd
->vdev_enc_sysfs_path
);
1402 (void) nvlist_add_uint64(aux
,
1403 FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE
, laststate
);
1406 zfs_post_common(spa
, vd
, FM_RSRC_CLASS
, FM_RESOURCE_STATECHANGE
,
1410 fm_nvlist_destroy(aux
, FM_NVA_FREE
);
1412 (void) spa
, (void) vd
, (void) laststate
;
1418 zfs_ereport_init(void)
1420 mutex_init(&recent_events_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1421 list_create(&recent_events_list
, sizeof (recent_events_node_t
),
1422 offsetof(recent_events_node_t
, re_list_link
));
1423 avl_create(&recent_events_tree
, recent_events_compare
,
1424 sizeof (recent_events_node_t
), offsetof(recent_events_node_t
,
1429 * This 'early' fini needs to run before zfs_fini() which on Linux waits
1430 * for the system_delay_taskq to drain.
1433 zfs_ereport_taskq_fini(void)
1435 mutex_enter(&recent_events_lock
);
1436 if (recent_events_cleaner_tqid
!= 0) {
1437 taskq_cancel_id(system_delay_taskq
, recent_events_cleaner_tqid
);
1438 recent_events_cleaner_tqid
= 0;
1440 mutex_exit(&recent_events_lock
);
1444 zfs_ereport_fini(void)
1446 recent_events_node_t
*entry
;
1448 while ((entry
= list_head(&recent_events_list
)) != NULL
) {
1449 avl_remove(&recent_events_tree
, entry
);
1450 list_remove(&recent_events_list
, entry
);
1451 kmem_free(entry
, sizeof (*entry
));
1453 avl_destroy(&recent_events_tree
);
1454 list_destroy(&recent_events_list
);
1455 mutex_destroy(&recent_events_lock
);
1459 zfs_ereport_snapshot_post(const char *subclass
, spa_t
*spa
, const char *name
)
1463 aux
= fm_nvlist_create(NULL
);
1464 nvlist_add_string(aux
, FM_EREPORT_PAYLOAD_ZFS_SNAPSHOT_NAME
, name
);
1466 zfs_post_common(spa
, NULL
, FM_RSRC_CLASS
, subclass
, aux
);
1467 fm_nvlist_destroy(aux
, FM_NVA_FREE
);
1471 * Post when a event when a zvol is created or removed
1473 * This is currently only used by macOS, since it uses the event to create
1474 * symlinks between the volume name (mypool/myvol) and the actual /dev
1475 * device (/dev/disk3). For example:
1477 * /var/run/zfs/dsk/mypool/myvol -> /dev/disk3
1479 * name: The full name of the zvol ("mypool/myvol")
1480 * dev_name: The full /dev name for the zvol ("/dev/disk3")
1481 * raw_name: The raw /dev name for the zvol ("/dev/rdisk3")
1484 zfs_ereport_zvol_post(const char *subclass
, const char *name
,
1485 const char *dev_name
, const char *raw_name
)
1490 boolean_t locked
= mutex_owned(&spa_namespace_lock
);
1491 if (!locked
) mutex_enter(&spa_namespace_lock
);
1492 spa_t
*spa
= spa_lookup(name
);
1493 if (!locked
) mutex_exit(&spa_namespace_lock
);
1498 aux
= fm_nvlist_create(NULL
);
1499 nvlist_add_string(aux
, FM_EREPORT_PAYLOAD_ZFS_DEVICE_NAME
, dev_name
);
1500 nvlist_add_string(aux
, FM_EREPORT_PAYLOAD_ZFS_RAW_DEVICE_NAME
,
1502 r
= strchr(name
, '/');
1504 nvlist_add_string(aux
, FM_EREPORT_PAYLOAD_ZFS_VOLUME
, &r
[1]);
1506 zfs_post_common(spa
, NULL
, FM_RSRC_CLASS
, subclass
, aux
);
1507 fm_nvlist_destroy(aux
, FM_NVA_FREE
);
1510 EXPORT_SYMBOL(zfs_ereport_post
);
1511 EXPORT_SYMBOL(zfs_ereport_is_valid
);
1512 EXPORT_SYMBOL(zfs_ereport_post_checksum
);
1513 EXPORT_SYMBOL(zfs_post_remove
);
1514 EXPORT_SYMBOL(zfs_post_autoreplace
);
1515 EXPORT_SYMBOL(zfs_post_state_change
);
1517 ZFS_MODULE_PARAM(zfs_zevent
, zfs_zevent_
, retain_max
, UINT
, ZMOD_RW
,
1518 "Maximum recent zevents records to retain for duplicate checking");
1519 ZFS_MODULE_PARAM(zfs_zevent
, zfs_zevent_
, retain_expire_secs
, UINT
, ZMOD_RW
,
1520 "Expiration time for recent zevents records");
1521 #endif /* _KERNEL */