]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/zfs_fm.c
Add build system
[mirror_zfs.git] / module / zfs / zfs_fm.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
9babb374 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
34dc7c2f
BB
23 * Use is subject to license terms.
24 */
25
34dc7c2f
BB
26#include <sys/spa.h>
27#include <sys/spa_impl.h>
28#include <sys/vdev.h>
29#include <sys/vdev_impl.h>
30#include <sys/zio.h>
428870ff 31#include <sys/zio_checksum.h>
34dc7c2f
BB
32
33#include <sys/fm/fs/zfs.h>
34#include <sys/fm/protocol.h>
35#include <sys/fm/util.h>
36#include <sys/sysevent.h>
37
38/*
39 * This general routine is responsible for generating all the different ZFS
40 * ereports. The payload is dependent on the class, and which arguments are
41 * supplied to the function:
42 *
43 * EREPORT POOL VDEV IO
44 * block X X X
45 * data X X
46 * device X X
47 * pool X
48 *
49 * If we are in a loading state, all errors are chained together by the same
b128c09f 50 * SPA-wide ENA (Error Numeric Association).
34dc7c2f
BB
51 *
52 * For isolated I/O requests, we get the ENA from the zio_t. The propagation
53 * gets very complicated due to RAID-Z, gang blocks, and vdev caching. We want
54 * to chain together all ereports associated with a logical piece of data. For
55 * read I/Os, there are basically three 'types' of I/O, which form a roughly
56 * layered diagram:
57 *
58 * +---------------+
59 * | Aggregate I/O | No associated logical data or device
60 * +---------------+
61 * |
62 * V
63 * +---------------+ Reads associated with a piece of logical data.
64 * | Read I/O | This includes reads on behalf of RAID-Z,
65 * +---------------+ mirrors, gang blocks, retries, etc.
66 * |
67 * V
68 * +---------------+ Reads associated with a particular device, but
69 * | Physical I/O | no logical data. Issued as part of vdev caching
70 * +---------------+ and I/O aggregation.
71 *
72 * Note that 'physical I/O' here is not the same terminology as used in the rest
73 * of ZIO. Typically, 'physical I/O' simply means that there is no attached
74 * blockpointer. But I/O with no associated block pointer can still be related
75 * to a logical piece of data (i.e. RAID-Z requests).
76 *
77 * Purely physical I/O always have unique ENAs. They are not related to a
78 * particular piece of logical data, and therefore cannot be chained together.
79 * We still generate an ereport, but the DE doesn't correlate it with any
80 * logical piece of data. When such an I/O fails, the delegated I/O requests
81 * will issue a retry, which will trigger the 'real' ereport with the correct
82 * ENA.
83 *
84 * We keep track of the ENA for a ZIO chain through the 'io_logical' member.
85 * When a new logical I/O is issued, we set this to point to itself. Child I/Os
86 * then inherit this pointer, so that when it is first set subsequent failures
b128c09f
BB
87 * will use the same ENA. For vdev cache fill and queue aggregation I/O,
88 * this pointer is set to NULL, and no ereport will be generated (since it
89 * doesn't actually correspond to any particular device or piece of data,
90 * and the caller will always retry without caching or queueing anyway).
428870ff
BB
91 *
92 * For checksum errors, we want to include more information about the actual
93 * error which occurs. Accordingly, we build an ereport when the error is
94 * noticed, but instead of sending it in immediately, we hang it off of the
95 * io_cksum_report field of the logical IO. When the logical IO completes
96 * (successfully or not), zfs_ereport_finish_checksum() is called with the
97 * good and bad versions of the buffer (if available), and we annotate the
98 * ereport with information about the differences.
34dc7c2f 99 */
428870ff
BB
100#ifdef _KERNEL
101static void
102zfs_ereport_start(nvlist_t **ereport_out, nvlist_t **detector_out,
103 const char *subclass, spa_t *spa, vdev_t *vd, zio_t *zio,
34dc7c2f
BB
104 uint64_t stateoroffset, uint64_t size)
105{
34dc7c2f 106 nvlist_t *ereport, *detector;
428870ff 107
34dc7c2f
BB
108 uint64_t ena;
109 char class[64];
110
111 /*
428870ff
BB
112 * If we are doing a spa_tryimport() or in recovery mode,
113 * ignore errors.
34dc7c2f 114 */
428870ff
BB
115 if (spa_load_state(spa) == SPA_LOAD_TRYIMPORT ||
116 spa_load_state(spa) == SPA_LOAD_RECOVER)
34dc7c2f
BB
117 return;
118
119 /*
120 * If we are in the middle of opening a pool, and the previous attempt
121 * failed, don't bother logging any new ereports - we're just going to
122 * get the same diagnosis anyway.
123 */
428870ff 124 if (spa_load_state(spa) != SPA_LOAD_NONE &&
34dc7c2f
BB
125 spa->spa_last_open_failed)
126 return;
127
b128c09f
BB
128 if (zio != NULL) {
129 /*
130 * If this is not a read or write zio, ignore the error. This
131 * can occur if the DKIOCFLUSHWRITECACHE ioctl fails.
132 */
133 if (zio->io_type != ZIO_TYPE_READ &&
134 zio->io_type != ZIO_TYPE_WRITE)
135 return;
34dc7c2f 136
b128c09f
BB
137 /*
138 * Ignore any errors from speculative I/Os, as failure is an
139 * expected result.
140 */
141 if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
142 return;
143
144 /*
9babb374
BB
145 * If this I/O is not a retry I/O, don't post an ereport.
146 * Otherwise, we risk making bad diagnoses based on B_FAILFAST
147 * I/Os.
b128c09f 148 */
9babb374
BB
149 if (zio->io_error == EIO &&
150 !(zio->io_flags & ZIO_FLAG_IO_RETRY))
b128c09f 151 return;
9babb374
BB
152
153 if (vd != NULL) {
154 /*
155 * If the vdev has already been marked as failing due
156 * to a failed probe, then ignore any subsequent I/O
157 * errors, as the DE will automatically fault the vdev
158 * on the first such failure. This also catches cases
159 * where vdev_remove_wanted is set and the device has
160 * not yet been asynchronously placed into the REMOVED
161 * state.
162 */
428870ff 163 if (zio->io_vd == vd && !vdev_accessible(vd, zio))
9babb374
BB
164 return;
165
166 /*
167 * Ignore checksum errors for reads from DTL regions of
168 * leaf vdevs.
169 */
170 if (zio->io_type == ZIO_TYPE_READ &&
171 zio->io_error == ECKSUM &&
172 vd->vdev_ops->vdev_op_leaf &&
173 vdev_dtl_contains(vd, DTL_MISSING, zio->io_txg, 1))
174 return;
175 }
b128c09f 176 }
34dc7c2f 177
428870ff
BB
178 /*
179 * For probe failure, we want to avoid posting ereports if we've
180 * already removed the device in the meantime.
181 */
182 if (vd != NULL &&
183 strcmp(subclass, FM_EREPORT_ZFS_PROBE_FAILURE) == 0 &&
184 (vd->vdev_remove_wanted || vd->vdev_state == VDEV_STATE_REMOVED))
185 return;
186
34dc7c2f
BB
187 if ((ereport = fm_nvlist_create(NULL)) == NULL)
188 return;
189
190 if ((detector = fm_nvlist_create(NULL)) == NULL) {
191 fm_nvlist_destroy(ereport, FM_NVA_FREE);
192 return;
193 }
194
195 /*
196 * Serialize ereport generation
197 */
198 mutex_enter(&spa->spa_errlist_lock);
199
200 /*
201 * Determine the ENA to use for this event. If we are in a loading
202 * state, use a SPA-wide ENA. Otherwise, if we are in an I/O state, use
203 * a root zio-wide ENA. Otherwise, simply use a unique ENA.
204 */
428870ff 205 if (spa_load_state(spa) != SPA_LOAD_NONE) {
34dc7c2f
BB
206 if (spa->spa_ena == 0)
207 spa->spa_ena = fm_ena_generate(0, FM_ENA_FMT1);
208 ena = spa->spa_ena;
209 } else if (zio != NULL && zio->io_logical != NULL) {
210 if (zio->io_logical->io_ena == 0)
211 zio->io_logical->io_ena =
212 fm_ena_generate(0, FM_ENA_FMT1);
213 ena = zio->io_logical->io_ena;
214 } else {
215 ena = fm_ena_generate(0, FM_ENA_FMT1);
216 }
217
218 /*
219 * Construct the full class, detector, and other standard FMA fields.
220 */
221 (void) snprintf(class, sizeof (class), "%s.%s",
222 ZFS_ERROR_CLASS, subclass);
223
224 fm_fmri_zfs_set(detector, FM_ZFS_SCHEME_VERSION, spa_guid(spa),
225 vd != NULL ? vd->vdev_guid : 0);
226
227 fm_ereport_set(ereport, FM_EREPORT_VERSION, class, ena, detector, NULL);
228
229 /*
230 * Construct the per-ereport payload, depending on which parameters are
231 * passed in.
232 */
233
234 /*
235 * Generic payload members common to all ereports.
34dc7c2f
BB
236 */
237 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_POOL,
b128c09f 238 DATA_TYPE_STRING, spa_name(spa), FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
34dc7c2f
BB
239 DATA_TYPE_UINT64, spa_guid(spa),
240 FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT, DATA_TYPE_INT32,
428870ff 241 spa_load_state(spa), NULL);
b128c09f
BB
242
243 if (spa != NULL) {
244 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_POOL_FAILMODE,
245 DATA_TYPE_STRING,
246 spa_get_failmode(spa) == ZIO_FAILURE_MODE_WAIT ?
247 FM_EREPORT_FAILMODE_WAIT :
248 spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE ?
249 FM_EREPORT_FAILMODE_CONTINUE : FM_EREPORT_FAILMODE_PANIC,
250 NULL);
251 }
34dc7c2f
BB
252
253 if (vd != NULL) {
254 vdev_t *pvd = vd->vdev_parent;
255
256 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID,
257 DATA_TYPE_UINT64, vd->vdev_guid,
258 FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,
259 DATA_TYPE_STRING, vd->vdev_ops->vdev_op_type, NULL);
9babb374 260 if (vd->vdev_path != NULL)
34dc7c2f
BB
261 fm_payload_set(ereport,
262 FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH,
263 DATA_TYPE_STRING, vd->vdev_path, NULL);
9babb374 264 if (vd->vdev_devid != NULL)
34dc7c2f
BB
265 fm_payload_set(ereport,
266 FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID,
267 DATA_TYPE_STRING, vd->vdev_devid, NULL);
9babb374
BB
268 if (vd->vdev_fru != NULL)
269 fm_payload_set(ereport,
270 FM_EREPORT_PAYLOAD_ZFS_VDEV_FRU,
271 DATA_TYPE_STRING, vd->vdev_fru, NULL);
34dc7c2f
BB
272
273 if (pvd != NULL) {
274 fm_payload_set(ereport,
275 FM_EREPORT_PAYLOAD_ZFS_PARENT_GUID,
276 DATA_TYPE_UINT64, pvd->vdev_guid,
277 FM_EREPORT_PAYLOAD_ZFS_PARENT_TYPE,
278 DATA_TYPE_STRING, pvd->vdev_ops->vdev_op_type,
279 NULL);
280 if (pvd->vdev_path)
281 fm_payload_set(ereport,
282 FM_EREPORT_PAYLOAD_ZFS_PARENT_PATH,
283 DATA_TYPE_STRING, pvd->vdev_path, NULL);
284 if (pvd->vdev_devid)
285 fm_payload_set(ereport,
286 FM_EREPORT_PAYLOAD_ZFS_PARENT_DEVID,
287 DATA_TYPE_STRING, pvd->vdev_devid, NULL);
288 }
289 }
290
291 if (zio != NULL) {
292 /*
293 * Payload common to all I/Os.
294 */
295 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_ERR,
296 DATA_TYPE_INT32, zio->io_error, NULL);
297
298 /*
299 * If the 'size' parameter is non-zero, it indicates this is a
300 * RAID-Z or other I/O where the physical offset and length are
301 * provided for us, instead of within the zio_t.
302 */
303 if (vd != NULL) {
304 if (size)
305 fm_payload_set(ereport,
306 FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET,
307 DATA_TYPE_UINT64, stateoroffset,
308 FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE,
309 DATA_TYPE_UINT64, size, NULL);
310 else
311 fm_payload_set(ereport,
312 FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET,
313 DATA_TYPE_UINT64, zio->io_offset,
314 FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE,
315 DATA_TYPE_UINT64, zio->io_size, NULL);
316 }
317
318 /*
319 * Payload for I/Os with corresponding logical information.
320 */
321 if (zio->io_logical != NULL)
322 fm_payload_set(ereport,
323 FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJSET,
324 DATA_TYPE_UINT64,
325 zio->io_logical->io_bookmark.zb_objset,
326 FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJECT,
327 DATA_TYPE_UINT64,
328 zio->io_logical->io_bookmark.zb_object,
329 FM_EREPORT_PAYLOAD_ZFS_ZIO_LEVEL,
330 DATA_TYPE_INT64,
331 zio->io_logical->io_bookmark.zb_level,
332 FM_EREPORT_PAYLOAD_ZFS_ZIO_BLKID,
333 DATA_TYPE_UINT64,
334 zio->io_logical->io_bookmark.zb_blkid, NULL);
335 } else if (vd != NULL) {
336 /*
337 * If we have a vdev but no zio, this is a device fault, and the
338 * 'stateoroffset' parameter indicates the previous state of the
339 * vdev.
340 */
341 fm_payload_set(ereport,
342 FM_EREPORT_PAYLOAD_ZFS_PREV_STATE,
343 DATA_TYPE_UINT64, stateoroffset, NULL);
344 }
428870ff 345
34dc7c2f
BB
346 mutex_exit(&spa->spa_errlist_lock);
347
428870ff
BB
348 *ereport_out = ereport;
349 *detector_out = detector;
350}
351
352/* if it's <= 128 bytes, save the corruption directly */
353#define ZFM_MAX_INLINE (128 / sizeof (uint64_t))
354
355#define MAX_RANGES 16
356
357typedef struct zfs_ecksum_info {
358 /* histograms of set and cleared bits by bit number in a 64-bit word */
359 uint16_t zei_histogram_set[sizeof (uint64_t) * NBBY];
360 uint16_t zei_histogram_cleared[sizeof (uint64_t) * NBBY];
361
362 /* inline arrays of bits set and cleared. */
363 uint64_t zei_bits_set[ZFM_MAX_INLINE];
364 uint64_t zei_bits_cleared[ZFM_MAX_INLINE];
365
366 /*
367 * for each range, the number of bits set and cleared. The Hamming
368 * distance between the good and bad buffers is the sum of them all.
369 */
370 uint32_t zei_range_sets[MAX_RANGES];
371 uint32_t zei_range_clears[MAX_RANGES];
372
373 struct zei_ranges {
374 uint32_t zr_start;
375 uint32_t zr_end;
376 } zei_ranges[MAX_RANGES];
377
378 size_t zei_range_count;
379 uint32_t zei_mingap;
380 uint32_t zei_allowed_mingap;
381
382} zfs_ecksum_info_t;
383
384static void
385update_histogram(uint64_t value_arg, uint16_t *hist, uint32_t *count)
386{
387 size_t i;
388 size_t bits = 0;
389 uint64_t value = BE_64(value_arg);
390
391 /* We store the bits in big-endian (largest-first) order */
392 for (i = 0; i < 64; i++) {
393 if (value & (1ull << i)) {
394 hist[63 - i]++;
395 ++bits;
396 }
397 }
398 /* update the count of bits changed */
399 *count += bits;
400}
401
402/*
403 * We've now filled up the range array, and need to increase "mingap" and
404 * shrink the range list accordingly. zei_mingap is always the smallest
405 * distance between array entries, so we set the new_allowed_gap to be
406 * one greater than that. We then go through the list, joining together
407 * any ranges which are closer than the new_allowed_gap.
408 *
409 * By construction, there will be at least one. We also update zei_mingap
410 * to the new smallest gap, to prepare for our next invocation.
411 */
412static void
413shrink_ranges(zfs_ecksum_info_t *eip)
414{
415 uint32_t mingap = UINT32_MAX;
416 uint32_t new_allowed_gap = eip->zei_mingap + 1;
417
418 size_t idx, output;
419 size_t max = eip->zei_range_count;
420
421 struct zei_ranges *r = eip->zei_ranges;
422
423 ASSERT3U(eip->zei_range_count, >, 0);
424 ASSERT3U(eip->zei_range_count, <=, MAX_RANGES);
425
426 output = idx = 0;
427 while (idx < max - 1) {
428 uint32_t start = r[idx].zr_start;
429 uint32_t end = r[idx].zr_end;
430
431 while (idx < max - 1) {
432 idx++;
433
434 uint32_t nstart = r[idx].zr_start;
435 uint32_t nend = r[idx].zr_end;
436
437 uint32_t gap = nstart - end;
438 if (gap < new_allowed_gap) {
439 end = nend;
440 continue;
441 }
442 if (gap < mingap)
443 mingap = gap;
444 break;
445 }
446 r[output].zr_start = start;
447 r[output].zr_end = end;
448 output++;
449 }
450 ASSERT3U(output, <, eip->zei_range_count);
451 eip->zei_range_count = output;
452 eip->zei_mingap = mingap;
453 eip->zei_allowed_mingap = new_allowed_gap;
454}
455
456static void
457add_range(zfs_ecksum_info_t *eip, int start, int end)
458{
459 struct zei_ranges *r = eip->zei_ranges;
460 size_t count = eip->zei_range_count;
461
462 if (count >= MAX_RANGES) {
463 shrink_ranges(eip);
464 count = eip->zei_range_count;
465 }
466 if (count == 0) {
467 eip->zei_mingap = UINT32_MAX;
468 eip->zei_allowed_mingap = 1;
469 } else {
470 int gap = start - r[count - 1].zr_end;
471
472 if (gap < eip->zei_allowed_mingap) {
473 r[count - 1].zr_end = end;
474 return;
475 }
476 if (gap < eip->zei_mingap)
477 eip->zei_mingap = gap;
478 }
479 r[count].zr_start = start;
480 r[count].zr_end = end;
481 eip->zei_range_count++;
482}
483
484static size_t
485range_total_size(zfs_ecksum_info_t *eip)
486{
487 struct zei_ranges *r = eip->zei_ranges;
488 size_t count = eip->zei_range_count;
489 size_t result = 0;
490 size_t idx;
491
492 for (idx = 0; idx < count; idx++)
493 result += (r[idx].zr_end - r[idx].zr_start);
494
495 return (result);
496}
497
498static zfs_ecksum_info_t *
499annotate_ecksum(nvlist_t *ereport, zio_bad_cksum_t *info,
500 const uint8_t *goodbuf, const uint8_t *badbuf, size_t size,
501 boolean_t drop_if_identical)
502{
503 const uint64_t *good = (const uint64_t *)goodbuf;
504 const uint64_t *bad = (const uint64_t *)badbuf;
505
506 uint64_t allset = 0;
507 uint64_t allcleared = 0;
508
509 size_t nui64s = size / sizeof (uint64_t);
510
511 size_t inline_size;
512 int no_inline = 0;
513 size_t idx;
514 size_t range;
515
516 size_t offset = 0;
517 ssize_t start = -1;
518
519 zfs_ecksum_info_t *eip = kmem_zalloc(sizeof (*eip), KM_SLEEP);
520
521 /* don't do any annotation for injected checksum errors */
522 if (info != NULL && info->zbc_injected)
523 return (eip);
524
525 if (info != NULL && info->zbc_has_cksum) {
526 fm_payload_set(ereport,
527 FM_EREPORT_PAYLOAD_ZFS_CKSUM_EXPECTED,
528 DATA_TYPE_UINT64_ARRAY,
529 sizeof (info->zbc_expected) / sizeof (uint64_t),
530 (uint64_t *)&info->zbc_expected,
531 FM_EREPORT_PAYLOAD_ZFS_CKSUM_ACTUAL,
532 DATA_TYPE_UINT64_ARRAY,
533 sizeof (info->zbc_actual) / sizeof (uint64_t),
534 (uint64_t *)&info->zbc_actual,
535 FM_EREPORT_PAYLOAD_ZFS_CKSUM_ALGO,
536 DATA_TYPE_STRING,
537 info->zbc_checksum_name,
538 NULL);
539
540 if (info->zbc_byteswapped) {
541 fm_payload_set(ereport,
542 FM_EREPORT_PAYLOAD_ZFS_CKSUM_BYTESWAP,
543 DATA_TYPE_BOOLEAN, 1,
544 NULL);
545 }
546 }
547
548 if (badbuf == NULL || goodbuf == NULL)
549 return (eip);
550
551 ASSERT3U(nui64s, <=, UINT16_MAX);
552 ASSERT3U(size, ==, nui64s * sizeof (uint64_t));
553 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
554 ASSERT3U(size, <=, UINT32_MAX);
555
556 /* build up the range list by comparing the two buffers. */
557 for (idx = 0; idx < nui64s; idx++) {
558 if (good[idx] == bad[idx]) {
559 if (start == -1)
560 continue;
561
562 add_range(eip, start, idx);
563 start = -1;
564 } else {
565 if (start != -1)
566 continue;
567
568 start = idx;
569 }
570 }
571 if (start != -1)
572 add_range(eip, start, idx);
573
574 /* See if it will fit in our inline buffers */
575 inline_size = range_total_size(eip);
576 if (inline_size > ZFM_MAX_INLINE)
577 no_inline = 1;
578
579 /*
580 * If there is no change and we want to drop if the buffers are
581 * identical, do so.
582 */
583 if (inline_size == 0 && drop_if_identical) {
584 kmem_free(eip, sizeof (*eip));
585 return (NULL);
586 }
587
588 /*
589 * Now walk through the ranges, filling in the details of the
590 * differences. Also convert our uint64_t-array offsets to byte
591 * offsets.
592 */
593 for (range = 0; range < eip->zei_range_count; range++) {
594 size_t start = eip->zei_ranges[range].zr_start;
595 size_t end = eip->zei_ranges[range].zr_end;
596
597 for (idx = start; idx < end; idx++) {
598 uint64_t set, cleared;
599
600 // bits set in bad, but not in good
601 set = ((~good[idx]) & bad[idx]);
602 // bits set in good, but not in bad
603 cleared = (good[idx] & (~bad[idx]));
604
605 allset |= set;
606 allcleared |= cleared;
607
608 if (!no_inline) {
609 ASSERT3U(offset, <, inline_size);
610 eip->zei_bits_set[offset] = set;
611 eip->zei_bits_cleared[offset] = cleared;
612 offset++;
613 }
614
615 update_histogram(set, eip->zei_histogram_set,
616 &eip->zei_range_sets[range]);
617 update_histogram(cleared, eip->zei_histogram_cleared,
618 &eip->zei_range_clears[range]);
619 }
620
621 /* convert to byte offsets */
622 eip->zei_ranges[range].zr_start *= sizeof (uint64_t);
623 eip->zei_ranges[range].zr_end *= sizeof (uint64_t);
624 }
625 eip->zei_allowed_mingap *= sizeof (uint64_t);
626 inline_size *= sizeof (uint64_t);
627
628 /* fill in ereport */
629 fm_payload_set(ereport,
630 FM_EREPORT_PAYLOAD_ZFS_BAD_OFFSET_RANGES,
631 DATA_TYPE_UINT32_ARRAY, 2 * eip->zei_range_count,
632 (uint32_t *)eip->zei_ranges,
633 FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_MIN_GAP,
634 DATA_TYPE_UINT32, eip->zei_allowed_mingap,
635 FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_SETS,
636 DATA_TYPE_UINT32_ARRAY, eip->zei_range_count, eip->zei_range_sets,
637 FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_CLEARS,
638 DATA_TYPE_UINT32_ARRAY, eip->zei_range_count, eip->zei_range_clears,
639 NULL);
640
641 if (!no_inline) {
642 fm_payload_set(ereport,
643 FM_EREPORT_PAYLOAD_ZFS_BAD_SET_BITS,
644 DATA_TYPE_UINT8_ARRAY,
645 inline_size, (uint8_t *)eip->zei_bits_set,
646 FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_BITS,
647 DATA_TYPE_UINT8_ARRAY,
648 inline_size, (uint8_t *)eip->zei_bits_cleared,
649 NULL);
650 } else {
651 fm_payload_set(ereport,
652 FM_EREPORT_PAYLOAD_ZFS_BAD_SET_HISTOGRAM,
653 DATA_TYPE_UINT16_ARRAY,
654 NBBY * sizeof (uint64_t), eip->zei_histogram_set,
655 FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_HISTOGRAM,
656 DATA_TYPE_UINT16_ARRAY,
657 NBBY * sizeof (uint64_t), eip->zei_histogram_cleared,
658 NULL);
659 }
660 return (eip);
661}
662#endif
663
664void
665zfs_ereport_post(const char *subclass, spa_t *spa, vdev_t *vd, zio_t *zio,
666 uint64_t stateoroffset, uint64_t size)
667{
668#ifdef _KERNEL
669 nvlist_t *ereport = NULL;
670 nvlist_t *detector = NULL;
671
672 zfs_ereport_start(&ereport, &detector,
673 subclass, spa, vd, zio, stateoroffset, size);
674
675 if (ereport == NULL)
676 return;
677
34dc7c2f
BB
678 fm_ereport_post(ereport, EVCH_SLEEP);
679
680 fm_nvlist_destroy(ereport, FM_NVA_FREE);
681 fm_nvlist_destroy(detector, FM_NVA_FREE);
682#endif
683}
684
428870ff
BB
685void
686zfs_ereport_start_checksum(spa_t *spa, vdev_t *vd,
687 struct zio *zio, uint64_t offset, uint64_t length, void *arg,
688 zio_bad_cksum_t *info)
689{
690 zio_cksum_report_t *report = kmem_zalloc(sizeof (*report), KM_SLEEP);
691
692 if (zio->io_vsd != NULL)
693 zio->io_vsd_ops->vsd_cksum_report(zio, report, arg);
694 else
695 zio_vsd_default_cksum_report(zio, report, arg);
696
697 /* copy the checksum failure information if it was provided */
698 if (info != NULL) {
699 report->zcr_ckinfo = kmem_zalloc(sizeof (*info), KM_SLEEP);
700 bcopy(info, report->zcr_ckinfo, sizeof (*info));
701 }
702
703 report->zcr_align = 1ULL << vd->vdev_top->vdev_ashift;
704 report->zcr_length = length;
705
706#ifdef _KERNEL
707 zfs_ereport_start(&report->zcr_ereport, &report->zcr_detector,
708 FM_EREPORT_ZFS_CHECKSUM, spa, vd, zio, offset, length);
709
710 if (report->zcr_ereport == NULL) {
711 report->zcr_free(report->zcr_cbdata, report->zcr_cbinfo);
712 kmem_free(report, sizeof (*report));
713 return;
714 }
715#endif
716
717 mutex_enter(&spa->spa_errlist_lock);
718 report->zcr_next = zio->io_logical->io_cksum_report;
719 zio->io_logical->io_cksum_report = report;
720 mutex_exit(&spa->spa_errlist_lock);
721}
722
723void
724zfs_ereport_finish_checksum(zio_cksum_report_t *report,
725 const void *good_data, const void *bad_data, boolean_t drop_if_identical)
726{
727#ifdef _KERNEL
728 zfs_ecksum_info_t *info = NULL;
729 info = annotate_ecksum(report->zcr_ereport, report->zcr_ckinfo,
730 good_data, bad_data, report->zcr_length, drop_if_identical);
731
732 if (info != NULL)
733 fm_ereport_post(report->zcr_ereport, EVCH_SLEEP);
734
735 fm_nvlist_destroy(report->zcr_ereport, FM_NVA_FREE);
736 fm_nvlist_destroy(report->zcr_detector, FM_NVA_FREE);
737 report->zcr_ereport = report->zcr_detector = NULL;
738
739 if (info != NULL)
740 kmem_free(info, sizeof (*info));
741#endif
742}
743
744void
745zfs_ereport_free_checksum(zio_cksum_report_t *rpt)
746{
747#ifdef _KERNEL
748 if (rpt->zcr_ereport != NULL) {
749 fm_nvlist_destroy(rpt->zcr_ereport,
750 FM_NVA_FREE);
751 fm_nvlist_destroy(rpt->zcr_detector,
752 FM_NVA_FREE);
753 }
754#endif
755 rpt->zcr_free(rpt->zcr_cbdata, rpt->zcr_cbinfo);
756
757 if (rpt->zcr_ckinfo != NULL)
758 kmem_free(rpt->zcr_ckinfo, sizeof (*rpt->zcr_ckinfo));
759
760 kmem_free(rpt, sizeof (*rpt));
761}
762
763void
764zfs_ereport_send_interim_checksum(zio_cksum_report_t *report)
765{
766#ifdef _KERNEL
767 fm_ereport_post(report->zcr_ereport, EVCH_SLEEP);
768#endif
769}
770
771void
772zfs_ereport_post_checksum(spa_t *spa, vdev_t *vd,
773 struct zio *zio, uint64_t offset, uint64_t length,
774 const void *good_data, const void *bad_data, zio_bad_cksum_t *zbc)
775{
776#ifdef _KERNEL
777 nvlist_t *ereport = NULL;
778 nvlist_t *detector = NULL;
779 zfs_ecksum_info_t *info;
780
781 zfs_ereport_start(&ereport, &detector,
782 FM_EREPORT_ZFS_CHECKSUM, spa, vd, zio, offset, length);
783
784 if (ereport == NULL)
785 return;
786
787 info = annotate_ecksum(ereport, zbc, good_data, bad_data, length,
788 B_FALSE);
789
790 if (info != NULL)
791 fm_ereport_post(ereport, EVCH_SLEEP);
792
793 fm_nvlist_destroy(ereport, FM_NVA_FREE);
794 fm_nvlist_destroy(detector, FM_NVA_FREE);
795
796 if (info != NULL)
797 kmem_free(info, sizeof (*info));
798#endif
799}
800
34dc7c2f
BB
801static void
802zfs_post_common(spa_t *spa, vdev_t *vd, const char *name)
803{
804#ifdef _KERNEL
805 nvlist_t *resource;
806 char class[64];
807
428870ff
BB
808 if (spa_load_state(spa) == SPA_LOAD_TRYIMPORT)
809 return;
810
34dc7c2f
BB
811 if ((resource = fm_nvlist_create(NULL)) == NULL)
812 return;
813
814 (void) snprintf(class, sizeof (class), "%s.%s.%s", FM_RSRC_RESOURCE,
815 ZFS_ERROR_CLASS, name);
816 VERIFY(nvlist_add_uint8(resource, FM_VERSION, FM_RSRC_VERSION) == 0);
817 VERIFY(nvlist_add_string(resource, FM_CLASS, class) == 0);
818 VERIFY(nvlist_add_uint64(resource,
819 FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, spa_guid(spa)) == 0);
820 if (vd)
821 VERIFY(nvlist_add_uint64(resource,
822 FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, vd->vdev_guid) == 0);
823
824 fm_ereport_post(resource, EVCH_SLEEP);
825
826 fm_nvlist_destroy(resource, FM_NVA_FREE);
827#endif
828}
829
34dc7c2f
BB
830/*
831 * The 'resource.fs.zfs.removed' event is an internal signal that the given vdev
832 * has been removed from the system. This will cause the DE to ignore any
833 * recent I/O errors, inferring that they are due to the asynchronous device
834 * removal.
835 */
836void
837zfs_post_remove(spa_t *spa, vdev_t *vd)
838{
839 zfs_post_common(spa, vd, FM_RESOURCE_REMOVED);
840}
841
842/*
843 * The 'resource.fs.zfs.autoreplace' event is an internal signal that the pool
844 * has the 'autoreplace' property set, and therefore any broken vdevs will be
845 * handled by higher level logic, and no vdev fault should be generated.
846 */
847void
848zfs_post_autoreplace(spa_t *spa, vdev_t *vd)
849{
850 zfs_post_common(spa, vd, FM_RESOURCE_AUTOREPLACE);
851}
428870ff
BB
852
853/*
854 * The 'resource.fs.zfs.statechange' event is an internal signal that the
855 * given vdev has transitioned its state to DEGRADED or HEALTHY. This will
856 * cause the retire agent to repair any outstanding fault management cases
857 * open because the device was not found (fault.fs.zfs.device).
858 */
859void
860zfs_post_state_change(spa_t *spa, vdev_t *vd)
861{
862 zfs_post_common(spa, vd, FM_RESOURCE_STATECHANGE);
863}