4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2012 by Delphix. All rights reserved.
31 #include <sys/spa_impl.h>
33 #include <sys/vdev_impl.h>
35 #include <sys/zio_checksum.h>
37 #include <sys/fm/fs/zfs.h>
38 #include <sys/fm/protocol.h>
39 #include <sys/fm/util.h>
40 #include <sys/sysevent.h>
43 * This general routine is responsible for generating all the different ZFS
44 * ereports. The payload is dependent on the class, and which arguments are
45 * supplied to the function:
47 * EREPORT POOL VDEV IO
53 * If we are in a loading state, all errors are chained together by the same
54 * SPA-wide ENA (Error Numeric Association).
56 * For isolated I/O requests, we get the ENA from the zio_t. The propagation
57 * gets very complicated due to RAID-Z, gang blocks, and vdev caching. We want
58 * to chain together all ereports associated with a logical piece of data. For
59 * read I/Os, there are basically three 'types' of I/O, which form a roughly
63 * | Aggregate I/O | No associated logical data or device
67 * +---------------+ Reads associated with a piece of logical data.
68 * | Read I/O | This includes reads on behalf of RAID-Z,
69 * +---------------+ mirrors, gang blocks, retries, etc.
72 * +---------------+ Reads associated with a particular device, but
73 * | Physical I/O | no logical data. Issued as part of vdev caching
74 * +---------------+ and I/O aggregation.
76 * Note that 'physical I/O' here is not the same terminology as used in the rest
77 * of ZIO. Typically, 'physical I/O' simply means that there is no attached
78 * blockpointer. But I/O with no associated block pointer can still be related
79 * to a logical piece of data (i.e. RAID-Z requests).
81 * Purely physical I/O always have unique ENAs. They are not related to a
82 * particular piece of logical data, and therefore cannot be chained together.
83 * We still generate an ereport, but the DE doesn't correlate it with any
84 * logical piece of data. When such an I/O fails, the delegated I/O requests
85 * will issue a retry, which will trigger the 'real' ereport with the correct
88 * We keep track of the ENA for a ZIO chain through the 'io_logical' member.
89 * When a new logical I/O is issued, we set this to point to itself. Child I/Os
90 * then inherit this pointer, so that when it is first set subsequent failures
91 * will use the same ENA. For vdev cache fill and queue aggregation I/O,
92 * this pointer is set to NULL, and no ereport will be generated (since it
93 * doesn't actually correspond to any particular device or piece of data,
94 * and the caller will always retry without caching or queueing anyway).
96 * For checksum errors, we want to include more information about the actual
97 * error which occurs. Accordingly, we build an ereport when the error is
98 * noticed, but instead of sending it in immediately, we hang it off of the
99 * io_cksum_report field of the logical IO. When the logical IO completes
100 * (successfully or not), zfs_ereport_finish_checksum() is called with the
101 * good and bad versions of the buffer (if available), and we annotate the
102 * ereport with information about the differences.
106 zfs_zevent_post_cb(nvlist_t
*nvl
, nvlist_t
*detector
)
109 fm_nvlist_destroy(nvl
, FM_NVA_FREE
);
112 fm_nvlist_destroy(detector
, FM_NVA_FREE
);
116 * We want to rate limit ZIO delay and checksum events so as to not
117 * flood ZED when a disk is acting up.
119 * Returns 1 if we're ratelimiting, 0 if not.
122 zfs_is_ratelimiting_event(const char *subclass
, vdev_t
*vd
)
126 * __ratelimit() returns 1 if we're *not* ratelimiting and 0 if we
127 * are. Invert it to get our return value.
129 if (strcmp(subclass
, FM_EREPORT_ZFS_DELAY
) == 0) {
130 rc
= !zfs_ratelimit(&vd
->vdev_delay_rl
);
131 } else if (strcmp(subclass
, FM_EREPORT_ZFS_CHECKSUM
) == 0) {
132 rc
= !zfs_ratelimit(&vd
->vdev_checksum_rl
);
136 /* We're rate limiting */
137 fm_erpt_dropped_increment();
144 zfs_ereport_start(nvlist_t
**ereport_out
, nvlist_t
**detector_out
,
145 const char *subclass
, spa_t
*spa
, vdev_t
*vd
, zio_t
*zio
,
146 uint64_t stateoroffset
, uint64_t size
)
148 nvlist_t
*ereport
, *detector
;
154 * If we are doing a spa_tryimport() or in recovery mode,
157 if (spa_load_state(spa
) == SPA_LOAD_TRYIMPORT
||
158 spa_load_state(spa
) == SPA_LOAD_RECOVER
)
162 * If we are in the middle of opening a pool, and the previous attempt
163 * failed, don't bother logging any new ereports - we're just going to
164 * get the same diagnosis anyway.
166 if (spa_load_state(spa
) != SPA_LOAD_NONE
&&
167 spa
->spa_last_open_failed
)
172 * If this is not a read or write zio, ignore the error. This
173 * can occur if the DKIOCFLUSHWRITECACHE ioctl fails.
175 if (zio
->io_type
!= ZIO_TYPE_READ
&&
176 zio
->io_type
!= ZIO_TYPE_WRITE
)
181 * If the vdev has already been marked as failing due
182 * to a failed probe, then ignore any subsequent I/O
183 * errors, as the DE will automatically fault the vdev
184 * on the first such failure. This also catches cases
185 * where vdev_remove_wanted is set and the device has
186 * not yet been asynchronously placed into the REMOVED
189 if (zio
->io_vd
== vd
&& !vdev_accessible(vd
, zio
))
193 * Ignore checksum errors for reads from DTL regions of
196 if (zio
->io_type
== ZIO_TYPE_READ
&&
197 zio
->io_error
== ECKSUM
&&
198 vd
->vdev_ops
->vdev_op_leaf
&&
199 vdev_dtl_contains(vd
, DTL_MISSING
, zio
->io_txg
, 1))
205 * For probe failure, we want to avoid posting ereports if we've
206 * already removed the device in the meantime.
209 strcmp(subclass
, FM_EREPORT_ZFS_PROBE_FAILURE
) == 0 &&
210 (vd
->vdev_remove_wanted
|| vd
->vdev_state
== VDEV_STATE_REMOVED
))
213 if ((strcmp(subclass
, FM_EREPORT_ZFS_DELAY
) == 0) &&
214 (zio
!= NULL
) && (!zio
->io_timestamp
)) {
215 /* Ignore bogus delay events */
219 if ((ereport
= fm_nvlist_create(NULL
)) == NULL
)
222 if ((detector
= fm_nvlist_create(NULL
)) == NULL
) {
223 fm_nvlist_destroy(ereport
, FM_NVA_FREE
);
228 * Serialize ereport generation
230 mutex_enter(&spa
->spa_errlist_lock
);
233 * Determine the ENA to use for this event. If we are in a loading
234 * state, use a SPA-wide ENA. Otherwise, if we are in an I/O state, use
235 * a root zio-wide ENA. Otherwise, simply use a unique ENA.
237 if (spa_load_state(spa
) != SPA_LOAD_NONE
) {
238 if (spa
->spa_ena
== 0)
239 spa
->spa_ena
= fm_ena_generate(0, FM_ENA_FMT1
);
241 } else if (zio
!= NULL
&& zio
->io_logical
!= NULL
) {
242 if (zio
->io_logical
->io_ena
== 0)
243 zio
->io_logical
->io_ena
=
244 fm_ena_generate(0, FM_ENA_FMT1
);
245 ena
= zio
->io_logical
->io_ena
;
247 ena
= fm_ena_generate(0, FM_ENA_FMT1
);
251 * Construct the full class, detector, and other standard FMA fields.
253 (void) snprintf(class, sizeof (class), "%s.%s",
254 ZFS_ERROR_CLASS
, subclass
);
256 fm_fmri_zfs_set(detector
, FM_ZFS_SCHEME_VERSION
, spa_guid(spa
),
257 vd
!= NULL
? vd
->vdev_guid
: 0);
259 fm_ereport_set(ereport
, FM_EREPORT_VERSION
, class, ena
, detector
, NULL
);
262 * Construct the per-ereport payload, depending on which parameters are
267 * Generic payload members common to all ereports.
269 fm_payload_set(ereport
,
270 FM_EREPORT_PAYLOAD_ZFS_POOL
, DATA_TYPE_STRING
, spa_name(spa
),
271 FM_EREPORT_PAYLOAD_ZFS_POOL_GUID
, DATA_TYPE_UINT64
, spa_guid(spa
),
272 FM_EREPORT_PAYLOAD_ZFS_POOL_STATE
, DATA_TYPE_UINT64
,
273 (uint64_t)spa_state(spa
),
274 FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT
, DATA_TYPE_INT32
,
275 (int32_t)spa_load_state(spa
), NULL
);
277 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_POOL_FAILMODE
,
279 spa_get_failmode(spa
) == ZIO_FAILURE_MODE_WAIT
?
280 FM_EREPORT_FAILMODE_WAIT
:
281 spa_get_failmode(spa
) == ZIO_FAILURE_MODE_CONTINUE
?
282 FM_EREPORT_FAILMODE_CONTINUE
: FM_EREPORT_FAILMODE_PANIC
,
286 vdev_t
*pvd
= vd
->vdev_parent
;
287 vdev_queue_t
*vq
= &vd
->vdev_queue
;
288 vdev_stat_t
*vs
= &vd
->vdev_stat
;
290 uint64_t *spare_guids
;
294 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID
,
295 DATA_TYPE_UINT64
, vd
->vdev_guid
,
296 FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE
,
297 DATA_TYPE_STRING
, vd
->vdev_ops
->vdev_op_type
, NULL
);
298 if (vd
->vdev_path
!= NULL
)
299 fm_payload_set(ereport
,
300 FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH
,
301 DATA_TYPE_STRING
, vd
->vdev_path
, NULL
);
302 if (vd
->vdev_devid
!= NULL
)
303 fm_payload_set(ereport
,
304 FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID
,
305 DATA_TYPE_STRING
, vd
->vdev_devid
, NULL
);
306 if (vd
->vdev_fru
!= NULL
)
307 fm_payload_set(ereport
,
308 FM_EREPORT_PAYLOAD_ZFS_VDEV_FRU
,
309 DATA_TYPE_STRING
, vd
->vdev_fru
, NULL
);
310 if (vd
->vdev_enc_sysfs_path
!= NULL
)
311 fm_payload_set(ereport
,
312 FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH
,
313 DATA_TYPE_STRING
, vd
->vdev_enc_sysfs_path
, NULL
);
315 fm_payload_set(ereport
,
316 FM_EREPORT_PAYLOAD_ZFS_VDEV_ASHIFT
,
317 DATA_TYPE_UINT64
, vd
->vdev_ashift
, NULL
);
320 fm_payload_set(ereport
,
321 FM_EREPORT_PAYLOAD_ZFS_VDEV_COMP_TS
,
322 DATA_TYPE_UINT64
, vq
->vq_io_complete_ts
, NULL
);
323 fm_payload_set(ereport
,
324 FM_EREPORT_PAYLOAD_ZFS_VDEV_DELTA_TS
,
325 DATA_TYPE_UINT64
, vq
->vq_io_delta_ts
, NULL
);
329 fm_payload_set(ereport
,
330 FM_EREPORT_PAYLOAD_ZFS_VDEV_READ_ERRORS
,
331 DATA_TYPE_UINT64
, vs
->vs_read_errors
,
332 FM_EREPORT_PAYLOAD_ZFS_VDEV_WRITE_ERRORS
,
333 DATA_TYPE_UINT64
, vs
->vs_write_errors
,
334 FM_EREPORT_PAYLOAD_ZFS_VDEV_CKSUM_ERRORS
,
335 DATA_TYPE_UINT64
, vs
->vs_checksum_errors
, NULL
);
339 fm_payload_set(ereport
,
340 FM_EREPORT_PAYLOAD_ZFS_PARENT_GUID
,
341 DATA_TYPE_UINT64
, pvd
->vdev_guid
,
342 FM_EREPORT_PAYLOAD_ZFS_PARENT_TYPE
,
343 DATA_TYPE_STRING
, pvd
->vdev_ops
->vdev_op_type
,
346 fm_payload_set(ereport
,
347 FM_EREPORT_PAYLOAD_ZFS_PARENT_PATH
,
348 DATA_TYPE_STRING
, pvd
->vdev_path
, NULL
);
350 fm_payload_set(ereport
,
351 FM_EREPORT_PAYLOAD_ZFS_PARENT_DEVID
,
352 DATA_TYPE_STRING
, pvd
->vdev_devid
, NULL
);
355 spare_count
= spa
->spa_spares
.sav_count
;
356 spare_paths
= kmem_zalloc(sizeof (char *) * spare_count
,
358 spare_guids
= kmem_zalloc(sizeof (uint64_t) * spare_count
,
361 for (i
= 0; i
< spare_count
; i
++) {
362 spare_vd
= spa
->spa_spares
.sav_vdevs
[i
];
364 spare_paths
[i
] = spare_vd
->vdev_path
;
365 spare_guids
[i
] = spare_vd
->vdev_guid
;
369 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_VDEV_SPARE_PATHS
,
370 DATA_TYPE_STRING_ARRAY
, spare_count
, spare_paths
,
371 FM_EREPORT_PAYLOAD_ZFS_VDEV_SPARE_GUIDS
,
372 DATA_TYPE_UINT64_ARRAY
, spare_count
, spare_guids
, NULL
);
374 kmem_free(spare_guids
, sizeof (uint64_t) * spare_count
);
375 kmem_free(spare_paths
, sizeof (char *) * spare_count
);
380 * Payload common to all I/Os.
382 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_ERR
,
383 DATA_TYPE_INT32
, zio
->io_error
, NULL
);
384 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS
,
385 DATA_TYPE_INT32
, zio
->io_flags
, NULL
);
386 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE
,
387 DATA_TYPE_UINT32
, zio
->io_stage
, NULL
);
388 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE
,
389 DATA_TYPE_UINT32
, zio
->io_pipeline
, NULL
);
390 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_DELAY
,
391 DATA_TYPE_UINT64
, zio
->io_delay
, NULL
);
392 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_TIMESTAMP
,
393 DATA_TYPE_UINT64
, zio
->io_timestamp
, NULL
);
394 fm_payload_set(ereport
, FM_EREPORT_PAYLOAD_ZFS_ZIO_DELTA
,
395 DATA_TYPE_UINT64
, zio
->io_delta
, NULL
);
398 * If the 'size' parameter is non-zero, it indicates this is a
399 * RAID-Z or other I/O where the physical offset and length are
400 * provided for us, instead of within the zio_t.
404 fm_payload_set(ereport
,
405 FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET
,
406 DATA_TYPE_UINT64
, stateoroffset
,
407 FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE
,
408 DATA_TYPE_UINT64
, size
, NULL
);
410 fm_payload_set(ereport
,
411 FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET
,
412 DATA_TYPE_UINT64
, zio
->io_offset
,
413 FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE
,
414 DATA_TYPE_UINT64
, zio
->io_size
, NULL
);
418 * Payload for I/Os with corresponding logical information.
420 if (zio
->io_logical
!= NULL
)
421 fm_payload_set(ereport
,
422 FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJSET
,
424 zio
->io_logical
->io_bookmark
.zb_objset
,
425 FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJECT
,
427 zio
->io_logical
->io_bookmark
.zb_object
,
428 FM_EREPORT_PAYLOAD_ZFS_ZIO_LEVEL
,
430 zio
->io_logical
->io_bookmark
.zb_level
,
431 FM_EREPORT_PAYLOAD_ZFS_ZIO_BLKID
,
433 zio
->io_logical
->io_bookmark
.zb_blkid
, NULL
);
434 } else if (vd
!= NULL
) {
436 * If we have a vdev but no zio, this is a device fault, and the
437 * 'stateoroffset' parameter indicates the previous state of the
440 fm_payload_set(ereport
,
441 FM_EREPORT_PAYLOAD_ZFS_PREV_STATE
,
442 DATA_TYPE_UINT64
, stateoroffset
, NULL
);
445 mutex_exit(&spa
->spa_errlist_lock
);
447 *ereport_out
= ereport
;
448 *detector_out
= detector
;
451 /* if it's <= 128 bytes, save the corruption directly */
452 #define ZFM_MAX_INLINE (128 / sizeof (uint64_t))
454 #define MAX_RANGES 16
456 typedef struct zfs_ecksum_info
{
457 /* histograms of set and cleared bits by bit number in a 64-bit word */
458 uint16_t zei_histogram_set
[sizeof (uint64_t) * NBBY
];
459 uint16_t zei_histogram_cleared
[sizeof (uint64_t) * NBBY
];
461 /* inline arrays of bits set and cleared. */
462 uint64_t zei_bits_set
[ZFM_MAX_INLINE
];
463 uint64_t zei_bits_cleared
[ZFM_MAX_INLINE
];
466 * for each range, the number of bits set and cleared. The Hamming
467 * distance between the good and bad buffers is the sum of them all.
469 uint32_t zei_range_sets
[MAX_RANGES
];
470 uint32_t zei_range_clears
[MAX_RANGES
];
475 } zei_ranges
[MAX_RANGES
];
477 size_t zei_range_count
;
479 uint32_t zei_allowed_mingap
;
484 update_histogram(uint64_t value_arg
, uint16_t *hist
, uint32_t *count
)
488 uint64_t value
= BE_64(value_arg
);
490 /* We store the bits in big-endian (largest-first) order */
491 for (i
= 0; i
< 64; i
++) {
492 if (value
& (1ull << i
)) {
493 if (hist
[63 - i
] < UINT16_MAX
)
498 /* update the count of bits changed */
503 * We've now filled up the range array, and need to increase "mingap" and
504 * shrink the range list accordingly. zei_mingap is always the smallest
505 * distance between array entries, so we set the new_allowed_gap to be
506 * one greater than that. We then go through the list, joining together
507 * any ranges which are closer than the new_allowed_gap.
509 * By construction, there will be at least one. We also update zei_mingap
510 * to the new smallest gap, to prepare for our next invocation.
513 zei_shrink_ranges(zfs_ecksum_info_t
*eip
)
515 uint32_t mingap
= UINT32_MAX
;
516 uint32_t new_allowed_gap
= eip
->zei_mingap
+ 1;
519 size_t max
= eip
->zei_range_count
;
521 struct zei_ranges
*r
= eip
->zei_ranges
;
523 ASSERT3U(eip
->zei_range_count
, >, 0);
524 ASSERT3U(eip
->zei_range_count
, <=, MAX_RANGES
);
527 while (idx
< max
- 1) {
528 uint32_t start
= r
[idx
].zr_start
;
529 uint32_t end
= r
[idx
].zr_end
;
531 while (idx
< max
- 1) {
532 uint32_t nstart
, nend
, gap
;
535 nstart
= r
[idx
].zr_start
;
536 nend
= r
[idx
].zr_end
;
539 if (gap
< new_allowed_gap
) {
547 r
[output
].zr_start
= start
;
548 r
[output
].zr_end
= end
;
551 ASSERT3U(output
, <, eip
->zei_range_count
);
552 eip
->zei_range_count
= output
;
553 eip
->zei_mingap
= mingap
;
554 eip
->zei_allowed_mingap
= new_allowed_gap
;
558 zei_add_range(zfs_ecksum_info_t
*eip
, int start
, int end
)
560 struct zei_ranges
*r
= eip
->zei_ranges
;
561 size_t count
= eip
->zei_range_count
;
563 if (count
>= MAX_RANGES
) {
564 zei_shrink_ranges(eip
);
565 count
= eip
->zei_range_count
;
568 eip
->zei_mingap
= UINT32_MAX
;
569 eip
->zei_allowed_mingap
= 1;
571 int gap
= start
- r
[count
- 1].zr_end
;
573 if (gap
< eip
->zei_allowed_mingap
) {
574 r
[count
- 1].zr_end
= end
;
577 if (gap
< eip
->zei_mingap
)
578 eip
->zei_mingap
= gap
;
580 r
[count
].zr_start
= start
;
581 r
[count
].zr_end
= end
;
582 eip
->zei_range_count
++;
586 zei_range_total_size(zfs_ecksum_info_t
*eip
)
588 struct zei_ranges
*r
= eip
->zei_ranges
;
589 size_t count
= eip
->zei_range_count
;
593 for (idx
= 0; idx
< count
; idx
++)
594 result
+= (r
[idx
].zr_end
- r
[idx
].zr_start
);
599 static zfs_ecksum_info_t
*
600 annotate_ecksum(nvlist_t
*ereport
, zio_bad_cksum_t
*info
,
601 const abd_t
*goodabd
, const abd_t
*badabd
, size_t size
,
602 boolean_t drop_if_identical
)
604 const uint64_t *good
;
608 uint64_t allcleared
= 0;
610 size_t nui64s
= size
/ sizeof (uint64_t);
620 zfs_ecksum_info_t
*eip
= kmem_zalloc(sizeof (*eip
), KM_SLEEP
);
622 /* don't do any annotation for injected checksum errors */
623 if (info
!= NULL
&& info
->zbc_injected
)
626 if (info
!= NULL
&& info
->zbc_has_cksum
) {
627 fm_payload_set(ereport
,
628 FM_EREPORT_PAYLOAD_ZFS_CKSUM_EXPECTED
,
629 DATA_TYPE_UINT64_ARRAY
,
630 sizeof (info
->zbc_expected
) / sizeof (uint64_t),
631 (uint64_t *)&info
->zbc_expected
,
632 FM_EREPORT_PAYLOAD_ZFS_CKSUM_ACTUAL
,
633 DATA_TYPE_UINT64_ARRAY
,
634 sizeof (info
->zbc_actual
) / sizeof (uint64_t),
635 (uint64_t *)&info
->zbc_actual
,
636 FM_EREPORT_PAYLOAD_ZFS_CKSUM_ALGO
,
638 info
->zbc_checksum_name
,
641 if (info
->zbc_byteswapped
) {
642 fm_payload_set(ereport
,
643 FM_EREPORT_PAYLOAD_ZFS_CKSUM_BYTESWAP
,
644 DATA_TYPE_BOOLEAN
, 1,
649 if (badabd
== NULL
|| goodabd
== NULL
)
652 ASSERT3U(size
, ==, nui64s
* sizeof (uint64_t));
653 ASSERT3U(size
, <=, SPA_MAXBLOCKSIZE
);
654 ASSERT3U(size
, <=, UINT32_MAX
);
656 good
= (const uint64_t *) abd_borrow_buf_copy((abd_t
*)goodabd
, size
);
657 bad
= (const uint64_t *) abd_borrow_buf_copy((abd_t
*)badabd
, size
);
659 /* build up the range list by comparing the two buffers. */
660 for (idx
= 0; idx
< nui64s
; idx
++) {
661 if (good
[idx
] == bad
[idx
]) {
665 zei_add_range(eip
, start
, idx
);
675 zei_add_range(eip
, start
, idx
);
677 /* See if it will fit in our inline buffers */
678 inline_size
= zei_range_total_size(eip
);
679 if (inline_size
> ZFM_MAX_INLINE
)
683 * If there is no change and we want to drop if the buffers are
686 if (inline_size
== 0 && drop_if_identical
) {
687 kmem_free(eip
, sizeof (*eip
));
688 abd_return_buf((abd_t
*)goodabd
, (void *)good
, size
);
689 abd_return_buf((abd_t
*)badabd
, (void *)bad
, size
);
694 * Now walk through the ranges, filling in the details of the
695 * differences. Also convert our uint64_t-array offsets to byte
698 for (range
= 0; range
< eip
->zei_range_count
; range
++) {
699 size_t start
= eip
->zei_ranges
[range
].zr_start
;
700 size_t end
= eip
->zei_ranges
[range
].zr_end
;
702 for (idx
= start
; idx
< end
; idx
++) {
703 uint64_t set
, cleared
;
705 // bits set in bad, but not in good
706 set
= ((~good
[idx
]) & bad
[idx
]);
707 // bits set in good, but not in bad
708 cleared
= (good
[idx
] & (~bad
[idx
]));
711 allcleared
|= cleared
;
714 ASSERT3U(offset
, <, inline_size
);
715 eip
->zei_bits_set
[offset
] = set
;
716 eip
->zei_bits_cleared
[offset
] = cleared
;
720 update_histogram(set
, eip
->zei_histogram_set
,
721 &eip
->zei_range_sets
[range
]);
722 update_histogram(cleared
, eip
->zei_histogram_cleared
,
723 &eip
->zei_range_clears
[range
]);
726 /* convert to byte offsets */
727 eip
->zei_ranges
[range
].zr_start
*= sizeof (uint64_t);
728 eip
->zei_ranges
[range
].zr_end
*= sizeof (uint64_t);
731 abd_return_buf((abd_t
*)goodabd
, (void *)good
, size
);
732 abd_return_buf((abd_t
*)badabd
, (void *)bad
, size
);
734 eip
->zei_allowed_mingap
*= sizeof (uint64_t);
735 inline_size
*= sizeof (uint64_t);
737 /* fill in ereport */
738 fm_payload_set(ereport
,
739 FM_EREPORT_PAYLOAD_ZFS_BAD_OFFSET_RANGES
,
740 DATA_TYPE_UINT32_ARRAY
, 2 * eip
->zei_range_count
,
741 (uint32_t *)eip
->zei_ranges
,
742 FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_MIN_GAP
,
743 DATA_TYPE_UINT32
, eip
->zei_allowed_mingap
,
744 FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_SETS
,
745 DATA_TYPE_UINT32_ARRAY
, eip
->zei_range_count
, eip
->zei_range_sets
,
746 FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_CLEARS
,
747 DATA_TYPE_UINT32_ARRAY
, eip
->zei_range_count
, eip
->zei_range_clears
,
751 fm_payload_set(ereport
,
752 FM_EREPORT_PAYLOAD_ZFS_BAD_SET_BITS
,
753 DATA_TYPE_UINT8_ARRAY
,
754 inline_size
, (uint8_t *)eip
->zei_bits_set
,
755 FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_BITS
,
756 DATA_TYPE_UINT8_ARRAY
,
757 inline_size
, (uint8_t *)eip
->zei_bits_cleared
,
760 fm_payload_set(ereport
,
761 FM_EREPORT_PAYLOAD_ZFS_BAD_SET_HISTOGRAM
,
762 DATA_TYPE_UINT16_ARRAY
,
763 NBBY
* sizeof (uint64_t), eip
->zei_histogram_set
,
764 FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_HISTOGRAM
,
765 DATA_TYPE_UINT16_ARRAY
,
766 NBBY
* sizeof (uint64_t), eip
->zei_histogram_cleared
,
774 zfs_ereport_post(const char *subclass
, spa_t
*spa
, vdev_t
*vd
, zio_t
*zio
,
775 uint64_t stateoroffset
, uint64_t size
)
778 nvlist_t
*ereport
= NULL
;
779 nvlist_t
*detector
= NULL
;
781 if (zfs_is_ratelimiting_event(subclass
, vd
))
784 zfs_ereport_start(&ereport
, &detector
,
785 subclass
, spa
, vd
, zio
, stateoroffset
, size
);
790 /* Cleanup is handled by the callback function */
791 zfs_zevent_post(ereport
, detector
, zfs_zevent_post_cb
);
796 zfs_ereport_start_checksum(spa_t
*spa
, vdev_t
*vd
,
797 struct zio
*zio
, uint64_t offset
, uint64_t length
, void *arg
,
798 zio_bad_cksum_t
*info
)
800 zio_cksum_report_t
*report
;
804 if (zfs_is_ratelimiting_event(FM_EREPORT_ZFS_CHECKSUM
, vd
))
808 report
= kmem_zalloc(sizeof (*report
), KM_SLEEP
);
810 if (zio
->io_vsd
!= NULL
)
811 zio
->io_vsd_ops
->vsd_cksum_report(zio
, report
, arg
);
813 zio_vsd_default_cksum_report(zio
, report
, arg
);
815 /* copy the checksum failure information if it was provided */
817 report
->zcr_ckinfo
= kmem_zalloc(sizeof (*info
), KM_SLEEP
);
818 bcopy(info
, report
->zcr_ckinfo
, sizeof (*info
));
821 report
->zcr_align
= 1ULL << vd
->vdev_top
->vdev_ashift
;
822 report
->zcr_length
= length
;
825 zfs_ereport_start(&report
->zcr_ereport
, &report
->zcr_detector
,
826 FM_EREPORT_ZFS_CHECKSUM
, spa
, vd
, zio
, offset
, length
);
828 if (report
->zcr_ereport
== NULL
) {
829 zfs_ereport_free_checksum(report
);
834 mutex_enter(&spa
->spa_errlist_lock
);
835 report
->zcr_next
= zio
->io_logical
->io_cksum_report
;
836 zio
->io_logical
->io_cksum_report
= report
;
837 mutex_exit(&spa
->spa_errlist_lock
);
841 zfs_ereport_finish_checksum(zio_cksum_report_t
*report
, const abd_t
*good_data
,
842 const abd_t
*bad_data
, boolean_t drop_if_identical
)
845 zfs_ecksum_info_t
*info
;
847 info
= annotate_ecksum(report
->zcr_ereport
, report
->zcr_ckinfo
,
848 good_data
, bad_data
, report
->zcr_length
, drop_if_identical
);
850 zfs_zevent_post(report
->zcr_ereport
,
851 report
->zcr_detector
, zfs_zevent_post_cb
);
853 zfs_zevent_post_cb(report
->zcr_ereport
, report
->zcr_detector
);
855 report
->zcr_ereport
= report
->zcr_detector
= NULL
;
857 kmem_free(info
, sizeof (*info
));
862 zfs_ereport_free_checksum(zio_cksum_report_t
*rpt
)
865 if (rpt
->zcr_ereport
!= NULL
) {
866 fm_nvlist_destroy(rpt
->zcr_ereport
,
868 fm_nvlist_destroy(rpt
->zcr_detector
,
872 rpt
->zcr_free(rpt
->zcr_cbdata
, rpt
->zcr_cbinfo
);
874 if (rpt
->zcr_ckinfo
!= NULL
)
875 kmem_free(rpt
->zcr_ckinfo
, sizeof (*rpt
->zcr_ckinfo
));
877 kmem_free(rpt
, sizeof (*rpt
));
882 zfs_ereport_post_checksum(spa_t
*spa
, vdev_t
*vd
,
883 struct zio
*zio
, uint64_t offset
, uint64_t length
,
884 const abd_t
*good_data
, const abd_t
*bad_data
, zio_bad_cksum_t
*zbc
)
887 nvlist_t
*ereport
= NULL
;
888 nvlist_t
*detector
= NULL
;
889 zfs_ecksum_info_t
*info
;
891 zfs_ereport_start(&ereport
, &detector
,
892 FM_EREPORT_ZFS_CHECKSUM
, spa
, vd
, zio
, offset
, length
);
897 info
= annotate_ecksum(ereport
, zbc
, good_data
, bad_data
, length
,
901 zfs_zevent_post(ereport
, detector
, zfs_zevent_post_cb
);
902 kmem_free(info
, sizeof (*info
));
908 * The 'sysevent.fs.zfs.*' events are signals posted to notify user space of
909 * change in the pool. All sysevents are listed in sys/sysevent/eventdefs.h
910 * and are designed to be consumed by the ZFS Event Daemon (ZED). For
911 * additional details refer to the zed(8) man page.
914 zfs_event_create(spa_t
*spa
, vdev_t
*vd
, const char *type
, const char *name
,
917 nvlist_t
*resource
= NULL
;
921 if (spa_load_state(spa
) == SPA_LOAD_TRYIMPORT
)
924 if ((resource
= fm_nvlist_create(NULL
)) == NULL
)
927 (void) snprintf(class, sizeof (class), "%s.%s.%s", type
,
928 ZFS_ERROR_CLASS
, name
);
929 VERIFY0(nvlist_add_uint8(resource
, FM_VERSION
, FM_RSRC_VERSION
));
930 VERIFY0(nvlist_add_string(resource
, FM_CLASS
, class));
931 VERIFY0(nvlist_add_string(resource
,
932 FM_EREPORT_PAYLOAD_ZFS_POOL
, spa_name(spa
)));
933 VERIFY0(nvlist_add_uint64(resource
,
934 FM_EREPORT_PAYLOAD_ZFS_POOL_GUID
, spa_guid(spa
)));
935 VERIFY0(nvlist_add_uint64(resource
,
936 FM_EREPORT_PAYLOAD_ZFS_POOL_STATE
, spa_state(spa
)));
937 VERIFY0(nvlist_add_int32(resource
,
938 FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT
, spa_load_state(spa
)));
941 VERIFY0(nvlist_add_uint64(resource
,
942 FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID
, vd
->vdev_guid
));
943 VERIFY0(nvlist_add_uint64(resource
,
944 FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE
, vd
->vdev_state
));
945 if (vd
->vdev_path
!= NULL
)
946 VERIFY0(nvlist_add_string(resource
,
947 FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH
, vd
->vdev_path
));
948 if (vd
->vdev_devid
!= NULL
)
949 VERIFY0(nvlist_add_string(resource
,
950 FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID
, vd
->vdev_devid
));
951 if (vd
->vdev_fru
!= NULL
)
952 VERIFY0(nvlist_add_string(resource
,
953 FM_EREPORT_PAYLOAD_ZFS_VDEV_FRU
, vd
->vdev_fru
));
954 if (vd
->vdev_enc_sysfs_path
!= NULL
)
955 VERIFY0(nvlist_add_string(resource
,
956 FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH
,
957 vd
->vdev_enc_sysfs_path
));
960 /* also copy any optional payload data */
962 nvpair_t
*elem
= NULL
;
964 while ((elem
= nvlist_next_nvpair(aux
, elem
)) != NULL
)
965 (void) nvlist_add_nvpair(resource
, elem
);
973 zfs_post_common(spa_t
*spa
, vdev_t
*vd
, const char *type
, const char *name
,
979 resource
= zfs_event_create(spa
, vd
, type
, name
, aux
);
981 zfs_zevent_post(resource
, NULL
, zfs_zevent_post_cb
);
986 * The 'resource.fs.zfs.removed' event is an internal signal that the given vdev
987 * has been removed from the system. This will cause the DE to ignore any
988 * recent I/O errors, inferring that they are due to the asynchronous device
992 zfs_post_remove(spa_t
*spa
, vdev_t
*vd
)
994 zfs_post_common(spa
, vd
, FM_RSRC_CLASS
, FM_RESOURCE_REMOVED
, NULL
);
998 * The 'resource.fs.zfs.autoreplace' event is an internal signal that the pool
999 * has the 'autoreplace' property set, and therefore any broken vdevs will be
1000 * handled by higher level logic, and no vdev fault should be generated.
1003 zfs_post_autoreplace(spa_t
*spa
, vdev_t
*vd
)
1005 zfs_post_common(spa
, vd
, FM_RSRC_CLASS
, FM_RESOURCE_AUTOREPLACE
, NULL
);
1009 * The 'resource.fs.zfs.statechange' event is an internal signal that the
1010 * given vdev has transitioned its state to DEGRADED or HEALTHY. This will
1011 * cause the retire agent to repair any outstanding fault management cases
1012 * open because the device was not found (fault.fs.zfs.device).
1015 zfs_post_state_change(spa_t
*spa
, vdev_t
*vd
, uint64_t laststate
)
1021 * Add optional supplemental keys to payload
1023 aux
= fm_nvlist_create(NULL
);
1025 if (vd
->vdev_physpath
) {
1026 (void) nvlist_add_string(aux
,
1027 FM_EREPORT_PAYLOAD_ZFS_VDEV_PHYSPATH
,
1030 if (vd
->vdev_enc_sysfs_path
) {
1031 (void) nvlist_add_string(aux
,
1032 FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH
,
1033 vd
->vdev_enc_sysfs_path
);
1036 (void) nvlist_add_uint64(aux
,
1037 FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE
, laststate
);
1040 zfs_post_common(spa
, vd
, FM_RSRC_CLASS
, FM_RESOURCE_STATECHANGE
,
1044 fm_nvlist_destroy(aux
, FM_NVA_FREE
);
1048 #if defined(_KERNEL) && defined(HAVE_SPL)
1049 EXPORT_SYMBOL(zfs_ereport_post
);
1050 EXPORT_SYMBOL(zfs_ereport_post_checksum
);
1051 EXPORT_SYMBOL(zfs_post_remove
);
1052 EXPORT_SYMBOL(zfs_post_autoreplace
);
1053 EXPORT_SYMBOL(zfs_post_state_change
);
1054 #endif /* _KERNEL */