]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/fm.c
cstyle: Resolve C style issues
[mirror_zfs.git] / module / zfs / fm.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /*
26 * Fault Management Architecture (FMA) Resource and Protocol Support
27 *
28 * The routines contained herein provide services to support kernel subsystems
29 * in publishing fault management telemetry (see PSARC 2002/412 and 2003/089).
30 *
31 * Name-Value Pair Lists
32 *
33 * The embodiment of an FMA protocol element (event, fmri or authority) is a
34 * name-value pair list (nvlist_t). FMA-specific nvlist construtor and
35 * destructor functions, fm_nvlist_create() and fm_nvlist_destroy(), are used
36 * to create an nvpair list using custom allocators. Callers may choose to
37 * allocate either from the kernel memory allocator, or from a preallocated
38 * buffer, useful in constrained contexts like high-level interrupt routines.
39 *
40 * Protocol Event and FMRI Construction
41 *
42 * Convenience routines are provided to construct nvlist events according to
43 * the FMA Event Protocol and Naming Schema specification for ereports and
44 * FMRIs for the dev, cpu, hc, mem, legacy hc and de schemes.
45 *
46 * ENA Manipulation
47 *
48 * Routines to generate ENA formats 0, 1 and 2 are available as well as
49 * routines to increment formats 1 and 2. Individual fields within the
50 * ENA are extractable via fm_ena_time_get(), fm_ena_id_get(),
51 * fm_ena_format_get() and fm_ena_gen_get().
52 */
53
54 #include <sys/types.h>
55 #include <sys/time.h>
56 #include <sys/list.h>
57 #include <sys/nvpair.h>
58 #include <sys/cmn_err.h>
59 #include <sys/sysmacros.h>
60 #include <sys/compress.h>
61 #include <sys/sunddi.h>
62 #include <sys/systeminfo.h>
63 #include <sys/fm/util.h>
64 #include <sys/fm/protocol.h>
65 #include <sys/kstat.h>
66 #include <sys/zfs_context.h>
67 #ifdef _KERNEL
68 #include <sys/atomic.h>
69 #include <sys/condvar.h>
70 #include <sys/cpuvar.h>
71 #include <sys/systm.h>
72 #include <sys/dumphdr.h>
73 #include <sys/cpuvar.h>
74 #include <sys/console.h>
75 #include <sys/kobj.h>
76 #include <sys/time.h>
77 #include <sys/zfs_ioctl.h>
78
79 int zfs_zevent_len_max = 0;
80 int zfs_zevent_cols = 80;
81 int zfs_zevent_console = 0;
82
83 static int zevent_len_cur = 0;
84 static int zevent_waiters = 0;
85 static int zevent_flags = 0;
86
87 static kmutex_t zevent_lock;
88 static list_t zevent_list;
89 static kcondvar_t zevent_cv;
90 #endif /* _KERNEL */
91
92 extern void fastreboot_disable_highpil(void);
93
94 /*
95 * Common fault management kstats to record event generation failures
96 */
97
98 struct erpt_kstat {
99 kstat_named_t erpt_dropped; /* num erpts dropped on post */
100 kstat_named_t erpt_set_failed; /* num erpt set failures */
101 kstat_named_t fmri_set_failed; /* num fmri set failures */
102 kstat_named_t payload_set_failed; /* num payload set failures */
103 };
104
105 static struct erpt_kstat erpt_kstat_data = {
106 { "erpt-dropped", KSTAT_DATA_UINT64 },
107 { "erpt-set-failed", KSTAT_DATA_UINT64 },
108 { "fmri-set-failed", KSTAT_DATA_UINT64 },
109 { "payload-set-failed", KSTAT_DATA_UINT64 }
110 };
111
112 kstat_t *fm_ksp;
113
114 #ifdef _KERNEL
115
116 /*
117 * Formatting utility function for fm_nvprintr. We attempt to wrap chunks of
118 * output so they aren't split across console lines, and return the end column.
119 */
120 /*PRINTFLIKE4*/
121 static int
122 fm_printf(int depth, int c, int cols, const char *format, ...)
123 {
124 va_list ap;
125 int width;
126 char c1;
127
128 va_start(ap, format);
129 width = vsnprintf(&c1, sizeof (c1), format, ap);
130 va_end(ap);
131
132 if (c + width >= cols) {
133 console_printf("\n");
134 c = 0;
135 if (format[0] != ' ' && depth > 0) {
136 console_printf(" ");
137 c++;
138 }
139 }
140
141 va_start(ap, format);
142 console_vprintf(format, ap);
143 va_end(ap);
144
145 return ((c + width) % cols);
146 }
147
148 /*
149 * Recursively print a nvlist in the specified column width and return the
150 * column we end up in. This function is called recursively by fm_nvprint(),
151 * below. We generically format the entire nvpair using hexadecimal
152 * integers and strings, and elide any integer arrays. Arrays are basically
153 * used for cache dumps right now, so we suppress them so as not to overwhelm
154 * the amount of console output we produce at panic time. This can be further
155 * enhanced as FMA technology grows based upon the needs of consumers. All
156 * FMA telemetry is logged using the dump device transport, so the console
157 * output serves only as a fallback in case this procedure is unsuccessful.
158 */
159 static int
160 fm_nvprintr(nvlist_t *nvl, int d, int c, int cols)
161 {
162 nvpair_t *nvp;
163
164 for (nvp = nvlist_next_nvpair(nvl, NULL);
165 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
166
167 data_type_t type = nvpair_type(nvp);
168 const char *name = nvpair_name(nvp);
169
170 boolean_t b;
171 uint8_t i8;
172 uint16_t i16;
173 uint32_t i32;
174 uint64_t i64;
175 char *str;
176 nvlist_t *cnv;
177
178 if (strcmp(name, FM_CLASS) == 0)
179 continue; /* already printed by caller */
180
181 c = fm_printf(d, c, cols, " %s=", name);
182
183 switch (type) {
184 case DATA_TYPE_BOOLEAN:
185 c = fm_printf(d + 1, c, cols, " 1");
186 break;
187
188 case DATA_TYPE_BOOLEAN_VALUE:
189 (void) nvpair_value_boolean_value(nvp, &b);
190 c = fm_printf(d + 1, c, cols, b ? "1" : "0");
191 break;
192
193 case DATA_TYPE_BYTE:
194 (void) nvpair_value_byte(nvp, &i8);
195 c = fm_printf(d + 1, c, cols, "0x%x", i8);
196 break;
197
198 case DATA_TYPE_INT8:
199 (void) nvpair_value_int8(nvp, (void *)&i8);
200 c = fm_printf(d + 1, c, cols, "0x%x", i8);
201 break;
202
203 case DATA_TYPE_UINT8:
204 (void) nvpair_value_uint8(nvp, &i8);
205 c = fm_printf(d + 1, c, cols, "0x%x", i8);
206 break;
207
208 case DATA_TYPE_INT16:
209 (void) nvpair_value_int16(nvp, (void *)&i16);
210 c = fm_printf(d + 1, c, cols, "0x%x", i16);
211 break;
212
213 case DATA_TYPE_UINT16:
214 (void) nvpair_value_uint16(nvp, &i16);
215 c = fm_printf(d + 1, c, cols, "0x%x", i16);
216 break;
217
218 case DATA_TYPE_INT32:
219 (void) nvpair_value_int32(nvp, (void *)&i32);
220 c = fm_printf(d + 1, c, cols, "0x%x", i32);
221 break;
222
223 case DATA_TYPE_UINT32:
224 (void) nvpair_value_uint32(nvp, &i32);
225 c = fm_printf(d + 1, c, cols, "0x%x", i32);
226 break;
227
228 case DATA_TYPE_INT64:
229 (void) nvpair_value_int64(nvp, (void *)&i64);
230 c = fm_printf(d + 1, c, cols, "0x%llx",
231 (u_longlong_t)i64);
232 break;
233
234 case DATA_TYPE_UINT64:
235 (void) nvpair_value_uint64(nvp, &i64);
236 c = fm_printf(d + 1, c, cols, "0x%llx",
237 (u_longlong_t)i64);
238 break;
239
240 case DATA_TYPE_HRTIME:
241 (void) nvpair_value_hrtime(nvp, (void *)&i64);
242 c = fm_printf(d + 1, c, cols, "0x%llx",
243 (u_longlong_t)i64);
244 break;
245
246 case DATA_TYPE_STRING:
247 (void) nvpair_value_string(nvp, &str);
248 c = fm_printf(d + 1, c, cols, "\"%s\"",
249 str ? str : "<NULL>");
250 break;
251
252 case DATA_TYPE_NVLIST:
253 c = fm_printf(d + 1, c, cols, "[");
254 (void) nvpair_value_nvlist(nvp, &cnv);
255 c = fm_nvprintr(cnv, d + 1, c, cols);
256 c = fm_printf(d + 1, c, cols, " ]");
257 break;
258
259 case DATA_TYPE_NVLIST_ARRAY: {
260 nvlist_t **val;
261 uint_t i, nelem;
262
263 c = fm_printf(d + 1, c, cols, "[");
264 (void) nvpair_value_nvlist_array(nvp, &val, &nelem);
265 for (i = 0; i < nelem; i++) {
266 c = fm_nvprintr(val[i], d + 1, c, cols);
267 }
268 c = fm_printf(d + 1, c, cols, " ]");
269 }
270 break;
271
272 case DATA_TYPE_INT8_ARRAY: {
273 int8_t *val;
274 uint_t i, nelem;
275
276 c = fm_printf(d + 1, c, cols, "[ ");
277 (void) nvpair_value_int8_array(nvp, &val, &nelem);
278 for (i = 0; i < nelem; i++)
279 c = fm_printf(d + 1, c, cols, "0x%llx ",
280 (u_longlong_t)val[i]);
281
282 c = fm_printf(d + 1, c, cols, "]");
283 break;
284 }
285
286 case DATA_TYPE_UINT8_ARRAY: {
287 uint8_t *val;
288 uint_t i, nelem;
289
290 c = fm_printf(d + 1, c, cols, "[ ");
291 (void) nvpair_value_uint8_array(nvp, &val, &nelem);
292 for (i = 0; i < nelem; i++)
293 c = fm_printf(d + 1, c, cols, "0x%llx ",
294 (u_longlong_t)val[i]);
295
296 c = fm_printf(d + 1, c, cols, "]");
297 break;
298 }
299
300 case DATA_TYPE_INT16_ARRAY: {
301 int16_t *val;
302 uint_t i, nelem;
303
304 c = fm_printf(d + 1, c, cols, "[ ");
305 (void) nvpair_value_int16_array(nvp, &val, &nelem);
306 for (i = 0; i < nelem; i++)
307 c = fm_printf(d + 1, c, cols, "0x%llx ",
308 (u_longlong_t)val[i]);
309
310 c = fm_printf(d + 1, c, cols, "]");
311 break;
312 }
313
314 case DATA_TYPE_UINT16_ARRAY: {
315 uint16_t *val;
316 uint_t i, nelem;
317
318 c = fm_printf(d + 1, c, cols, "[ ");
319 (void) nvpair_value_uint16_array(nvp, &val, &nelem);
320 for (i = 0; i < nelem; i++)
321 c = fm_printf(d + 1, c, cols, "0x%llx ",
322 (u_longlong_t)val[i]);
323
324 c = fm_printf(d + 1, c, cols, "]");
325 break;
326 }
327
328 case DATA_TYPE_INT32_ARRAY: {
329 int32_t *val;
330 uint_t i, nelem;
331
332 c = fm_printf(d + 1, c, cols, "[ ");
333 (void) nvpair_value_int32_array(nvp, &val, &nelem);
334 for (i = 0; i < nelem; i++)
335 c = fm_printf(d + 1, c, cols, "0x%llx ",
336 (u_longlong_t)val[i]);
337
338 c = fm_printf(d + 1, c, cols, "]");
339 break;
340 }
341
342 case DATA_TYPE_UINT32_ARRAY: {
343 uint32_t *val;
344 uint_t i, nelem;
345
346 c = fm_printf(d + 1, c, cols, "[ ");
347 (void) nvpair_value_uint32_array(nvp, &val, &nelem);
348 for (i = 0; i < nelem; i++)
349 c = fm_printf(d + 1, c, cols, "0x%llx ",
350 (u_longlong_t)val[i]);
351
352 c = fm_printf(d + 1, c, cols, "]");
353 break;
354 }
355
356 case DATA_TYPE_INT64_ARRAY: {
357 int64_t *val;
358 uint_t i, nelem;
359
360 c = fm_printf(d + 1, c, cols, "[ ");
361 (void) nvpair_value_int64_array(nvp, &val, &nelem);
362 for (i = 0; i < nelem; i++)
363 c = fm_printf(d + 1, c, cols, "0x%llx ",
364 (u_longlong_t)val[i]);
365
366 c = fm_printf(d + 1, c, cols, "]");
367 break;
368 }
369
370 case DATA_TYPE_UINT64_ARRAY: {
371 uint64_t *val;
372 uint_t i, nelem;
373
374 c = fm_printf(d + 1, c, cols, "[ ");
375 (void) nvpair_value_uint64_array(nvp, &val, &nelem);
376 for (i = 0; i < nelem; i++)
377 c = fm_printf(d + 1, c, cols, "0x%llx ",
378 (u_longlong_t)val[i]);
379
380 c = fm_printf(d + 1, c, cols, "]");
381 break;
382 }
383
384 case DATA_TYPE_STRING_ARRAY:
385 case DATA_TYPE_BOOLEAN_ARRAY:
386 case DATA_TYPE_BYTE_ARRAY:
387 c = fm_printf(d + 1, c, cols, "[...]");
388 break;
389
390 case DATA_TYPE_UNKNOWN:
391 c = fm_printf(d + 1, c, cols, "<unknown>");
392 break;
393 }
394 }
395
396 return (c);
397 }
398
399 void
400 fm_nvprint(nvlist_t *nvl)
401 {
402 char *class;
403 int c = 0;
404
405 console_printf("\n");
406
407 if (nvlist_lookup_string(nvl, FM_CLASS, &class) == 0)
408 c = fm_printf(0, c, zfs_zevent_cols, "%s", class);
409
410 if (fm_nvprintr(nvl, 0, c, zfs_zevent_cols) != 0)
411 console_printf("\n");
412
413 console_printf("\n");
414 }
415
416 static zevent_t *
417 zfs_zevent_alloc(void)
418 {
419 zevent_t *ev;
420
421 ev = kmem_zalloc(sizeof (zevent_t), KM_PUSHPAGE);
422 if (ev == NULL)
423 return (NULL);
424
425 list_create(&ev->ev_ze_list, sizeof (zfs_zevent_t),
426 offsetof(zfs_zevent_t, ze_node));
427 list_link_init(&ev->ev_node);
428
429 return (ev);
430 }
431
432 static void
433 zfs_zevent_free(zevent_t *ev)
434 {
435 /* Run provided cleanup callback */
436 ev->ev_cb(ev->ev_nvl, ev->ev_detector);
437
438 list_destroy(&ev->ev_ze_list);
439 kmem_free(ev, sizeof (zevent_t));
440 }
441
442 static void
443 zfs_zevent_drain(zevent_t *ev)
444 {
445 zfs_zevent_t *ze;
446
447 ASSERT(MUTEX_HELD(&zevent_lock));
448 list_remove(&zevent_list, ev);
449
450 /* Remove references to this event in all private file data */
451 while ((ze = list_head(&ev->ev_ze_list)) != NULL) {
452 list_remove(&ev->ev_ze_list, ze);
453 ze->ze_zevent = NULL;
454 ze->ze_dropped++;
455 }
456
457 zfs_zevent_free(ev);
458 }
459
460 void
461 zfs_zevent_drain_all(int *count)
462 {
463 zevent_t *ev;
464
465 mutex_enter(&zevent_lock);
466 while ((ev = list_head(&zevent_list)) != NULL)
467 zfs_zevent_drain(ev);
468
469 *count = zevent_len_cur;
470 zevent_len_cur = 0;
471 mutex_exit(&zevent_lock);
472 }
473
474 /*
475 * New zevents are inserted at the head. If the maximum queue
476 * length is exceeded a zevent will be drained from the tail.
477 * As part of this any user space processes which currently have
478 * a reference to this zevent_t in their private data will have
479 * this reference set to NULL.
480 */
481 static void
482 zfs_zevent_insert(zevent_t *ev)
483 {
484 ASSERT(MUTEX_HELD(&zevent_lock));
485 list_insert_head(&zevent_list, ev);
486
487 if (zevent_len_cur >= zfs_zevent_len_max)
488 zfs_zevent_drain(list_tail(&zevent_list));
489 else
490 zevent_len_cur++;
491 }
492
493 /*
494 * Post a zevent
495 */
496 void
497 zfs_zevent_post(nvlist_t *nvl, nvlist_t *detector, zevent_cb_t *cb)
498 {
499 int64_t tv_array[2];
500 timestruc_t tv;
501 size_t nvl_size = 0;
502 zevent_t *ev;
503
504 gethrestime(&tv);
505 tv_array[0] = tv.tv_sec;
506 tv_array[1] = tv.tv_nsec;
507 if (nvlist_add_int64_array(nvl, FM_EREPORT_TIME, tv_array, 2)) {
508 atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
509 return;
510 }
511
512 (void) nvlist_size(nvl, &nvl_size, NV_ENCODE_NATIVE);
513 if (nvl_size > ERPT_DATA_SZ || nvl_size == 0) {
514 atomic_add_64(&erpt_kstat_data.erpt_dropped.value.ui64, 1);
515 return;
516 }
517
518 if (zfs_zevent_console)
519 fm_nvprint(nvl);
520
521 ev = zfs_zevent_alloc();
522 if (ev == NULL) {
523 atomic_add_64(&erpt_kstat_data.erpt_dropped.value.ui64, 1);
524 return;
525 }
526
527 ev->ev_nvl = nvl;
528 ev->ev_detector = detector;
529 ev->ev_cb = cb;
530
531 mutex_enter(&zevent_lock);
532 zfs_zevent_insert(ev);
533 cv_broadcast(&zevent_cv);
534 mutex_exit(&zevent_lock);
535 }
536
537 static int
538 zfs_zevent_minor_to_state(minor_t minor, zfs_zevent_t **ze)
539 {
540 *ze = zfsdev_get_state(minor, ZST_ZEVENT);
541 if (*ze == NULL)
542 return (EBADF);
543
544 return (0);
545 }
546
547 int
548 zfs_zevent_fd_hold(int fd, minor_t *minorp, zfs_zevent_t **ze)
549 {
550 file_t *fp;
551 int error;
552
553 fp = getf(fd);
554 if (fp == NULL)
555 return (EBADF);
556
557 *minorp = zfsdev_getminor(fp->f_file);
558 error = zfs_zevent_minor_to_state(*minorp, ze);
559
560 if (error)
561 zfs_zevent_fd_rele(fd);
562
563 return (error);
564 }
565
566 void
567 zfs_zevent_fd_rele(int fd)
568 {
569 releasef(fd);
570 }
571
572 /*
573 * Get the next zevent in the stream and place a copy in 'event'. This
574 * may fail with ENOMEM if the encoded nvlist size exceeds the passed
575 * 'event_size'. In this case the stream pointer is not advanced and
576 * and 'event_size' is set to the minimum required buffer size.
577 */
578 int
579 zfs_zevent_next(zfs_zevent_t *ze, nvlist_t **event, uint64_t *event_size,
580 uint64_t *dropped)
581 {
582 zevent_t *ev;
583 size_t size;
584 int error = 0;
585
586 mutex_enter(&zevent_lock);
587 if (ze->ze_zevent == NULL) {
588 /* New stream start at the beginning/tail */
589 ev = list_tail(&zevent_list);
590 if (ev == NULL) {
591 error = ENOENT;
592 goto out;
593 }
594 } else {
595 /*
596 * Existing stream continue with the next element and remove
597 * ourselves from the wait queue for the previous element
598 */
599 ev = list_prev(&zevent_list, ze->ze_zevent);
600 if (ev == NULL) {
601 error = ENOENT;
602 goto out;
603 }
604 }
605
606 VERIFY(nvlist_size(ev->ev_nvl, &size, NV_ENCODE_NATIVE) == 0);
607 if (size > *event_size) {
608 *event_size = size;
609 error = ENOMEM;
610 goto out;
611 }
612
613 if (ze->ze_zevent)
614 list_remove(&ze->ze_zevent->ev_ze_list, ze);
615
616 ze->ze_zevent = ev;
617 list_insert_head(&ev->ev_ze_list, ze);
618 nvlist_dup(ev->ev_nvl, event, KM_SLEEP);
619 *dropped = ze->ze_dropped;
620 ze->ze_dropped = 0;
621 out:
622 mutex_exit(&zevent_lock);
623
624 return (error);
625 }
626
627 int
628 zfs_zevent_wait(zfs_zevent_t *ze)
629 {
630 int error = 0;
631
632 mutex_enter(&zevent_lock);
633
634 if (zevent_flags & ZEVENT_SHUTDOWN) {
635 error = ESHUTDOWN;
636 goto out;
637 }
638
639 zevent_waiters++;
640 cv_wait_interruptible(&zevent_cv, &zevent_lock);
641 if (issig(JUSTLOOKING))
642 error = EINTR;
643
644 zevent_waiters--;
645 out:
646 mutex_exit(&zevent_lock);
647
648 return (error);
649 }
650
651 void
652 zfs_zevent_init(zfs_zevent_t **zep)
653 {
654 zfs_zevent_t *ze;
655
656 ze = *zep = kmem_zalloc(sizeof (zfs_zevent_t), KM_SLEEP);
657 list_link_init(&ze->ze_node);
658 }
659
660 void
661 zfs_zevent_destroy(zfs_zevent_t *ze)
662 {
663 mutex_enter(&zevent_lock);
664 if (ze->ze_zevent)
665 list_remove(&ze->ze_zevent->ev_ze_list, ze);
666 mutex_exit(&zevent_lock);
667
668 kmem_free(ze, sizeof (zfs_zevent_t));
669 }
670 #endif /* _KERNEL */
671
672 /*
673 * Wrapppers for FM nvlist allocators
674 */
675 /* ARGSUSED */
676 static void *
677 i_fm_alloc(nv_alloc_t *nva, size_t size)
678 {
679 return (kmem_zalloc(size, KM_PUSHPAGE));
680 }
681
682 /* ARGSUSED */
683 static void
684 i_fm_free(nv_alloc_t *nva, void *buf, size_t size)
685 {
686 kmem_free(buf, size);
687 }
688
689 const nv_alloc_ops_t fm_mem_alloc_ops = {
690 NULL,
691 NULL,
692 i_fm_alloc,
693 i_fm_free,
694 NULL
695 };
696
697 /*
698 * Create and initialize a new nv_alloc_t for a fixed buffer, buf. A pointer
699 * to the newly allocated nv_alloc_t structure is returned upon success or NULL
700 * is returned to indicate that the nv_alloc structure could not be created.
701 */
702 nv_alloc_t *
703 fm_nva_xcreate(char *buf, size_t bufsz)
704 {
705 nv_alloc_t *nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP);
706
707 if (bufsz == 0 || nv_alloc_init(nvhdl, nv_fixed_ops, buf, bufsz) != 0) {
708 kmem_free(nvhdl, sizeof (nv_alloc_t));
709 return (NULL);
710 }
711
712 return (nvhdl);
713 }
714
715 /*
716 * Destroy a previously allocated nv_alloc structure. The fixed buffer
717 * associated with nva must be freed by the caller.
718 */
719 void
720 fm_nva_xdestroy(nv_alloc_t *nva)
721 {
722 nv_alloc_fini(nva);
723 kmem_free(nva, sizeof (nv_alloc_t));
724 }
725
726 /*
727 * Create a new nv list. A pointer to a new nv list structure is returned
728 * upon success or NULL is returned to indicate that the structure could
729 * not be created. The newly created nv list is created and managed by the
730 * operations installed in nva. If nva is NULL, the default FMA nva
731 * operations are installed and used.
732 *
733 * When called from the kernel and nva == NULL, this function must be called
734 * from passive kernel context with no locks held that can prevent a
735 * sleeping memory allocation from occurring. Otherwise, this function may
736 * be called from other kernel contexts as long a valid nva created via
737 * fm_nva_create() is supplied.
738 */
739 nvlist_t *
740 fm_nvlist_create(nv_alloc_t *nva)
741 {
742 int hdl_alloced = 0;
743 nvlist_t *nvl;
744 nv_alloc_t *nvhdl;
745
746 if (nva == NULL) {
747 nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_PUSHPAGE);
748
749 if (nv_alloc_init(nvhdl, &fm_mem_alloc_ops, NULL, 0) != 0) {
750 kmem_free(nvhdl, sizeof (nv_alloc_t));
751 return (NULL);
752 }
753 hdl_alloced = 1;
754 } else {
755 nvhdl = nva;
756 }
757
758 if (nvlist_xalloc(&nvl, NV_UNIQUE_NAME, nvhdl) != 0) {
759 if (hdl_alloced) {
760 nv_alloc_fini(nvhdl);
761 kmem_free(nvhdl, sizeof (nv_alloc_t));
762 }
763 return (NULL);
764 }
765
766 return (nvl);
767 }
768
769 /*
770 * Destroy a previously allocated nvlist structure. flag indicates whether
771 * or not the associated nva structure should be freed (FM_NVA_FREE) or
772 * retained (FM_NVA_RETAIN). Retaining the nv alloc structure allows
773 * it to be re-used for future nvlist creation operations.
774 */
775 void
776 fm_nvlist_destroy(nvlist_t *nvl, int flag)
777 {
778 nv_alloc_t *nva = nvlist_lookup_nv_alloc(nvl);
779
780 nvlist_free(nvl);
781
782 if (nva != NULL) {
783 if (flag == FM_NVA_FREE)
784 fm_nva_xdestroy(nva);
785 }
786 }
787
788 int
789 i_fm_payload_set(nvlist_t *payload, const char *name, va_list ap)
790 {
791 int nelem, ret = 0;
792 data_type_t type;
793
794 while (ret == 0 && name != NULL) {
795 type = va_arg(ap, data_type_t);
796 switch (type) {
797 case DATA_TYPE_BYTE:
798 ret = nvlist_add_byte(payload, name,
799 va_arg(ap, uint_t));
800 break;
801 case DATA_TYPE_BYTE_ARRAY:
802 nelem = va_arg(ap, int);
803 ret = nvlist_add_byte_array(payload, name,
804 va_arg(ap, uchar_t *), nelem);
805 break;
806 case DATA_TYPE_BOOLEAN_VALUE:
807 ret = nvlist_add_boolean_value(payload, name,
808 va_arg(ap, boolean_t));
809 break;
810 case DATA_TYPE_BOOLEAN_ARRAY:
811 nelem = va_arg(ap, int);
812 ret = nvlist_add_boolean_array(payload, name,
813 va_arg(ap, boolean_t *), nelem);
814 break;
815 case DATA_TYPE_INT8:
816 ret = nvlist_add_int8(payload, name,
817 va_arg(ap, int));
818 break;
819 case DATA_TYPE_INT8_ARRAY:
820 nelem = va_arg(ap, int);
821 ret = nvlist_add_int8_array(payload, name,
822 va_arg(ap, int8_t *), nelem);
823 break;
824 case DATA_TYPE_UINT8:
825 ret = nvlist_add_uint8(payload, name,
826 va_arg(ap, uint_t));
827 break;
828 case DATA_TYPE_UINT8_ARRAY:
829 nelem = va_arg(ap, int);
830 ret = nvlist_add_uint8_array(payload, name,
831 va_arg(ap, uint8_t *), nelem);
832 break;
833 case DATA_TYPE_INT16:
834 ret = nvlist_add_int16(payload, name,
835 va_arg(ap, int));
836 break;
837 case DATA_TYPE_INT16_ARRAY:
838 nelem = va_arg(ap, int);
839 ret = nvlist_add_int16_array(payload, name,
840 va_arg(ap, int16_t *), nelem);
841 break;
842 case DATA_TYPE_UINT16:
843 ret = nvlist_add_uint16(payload, name,
844 va_arg(ap, uint_t));
845 break;
846 case DATA_TYPE_UINT16_ARRAY:
847 nelem = va_arg(ap, int);
848 ret = nvlist_add_uint16_array(payload, name,
849 va_arg(ap, uint16_t *), nelem);
850 break;
851 case DATA_TYPE_INT32:
852 ret = nvlist_add_int32(payload, name,
853 va_arg(ap, int32_t));
854 break;
855 case DATA_TYPE_INT32_ARRAY:
856 nelem = va_arg(ap, int);
857 ret = nvlist_add_int32_array(payload, name,
858 va_arg(ap, int32_t *), nelem);
859 break;
860 case DATA_TYPE_UINT32:
861 ret = nvlist_add_uint32(payload, name,
862 va_arg(ap, uint32_t));
863 break;
864 case DATA_TYPE_UINT32_ARRAY:
865 nelem = va_arg(ap, int);
866 ret = nvlist_add_uint32_array(payload, name,
867 va_arg(ap, uint32_t *), nelem);
868 break;
869 case DATA_TYPE_INT64:
870 ret = nvlist_add_int64(payload, name,
871 va_arg(ap, int64_t));
872 break;
873 case DATA_TYPE_INT64_ARRAY:
874 nelem = va_arg(ap, int);
875 ret = nvlist_add_int64_array(payload, name,
876 va_arg(ap, int64_t *), nelem);
877 break;
878 case DATA_TYPE_UINT64:
879 ret = nvlist_add_uint64(payload, name,
880 va_arg(ap, uint64_t));
881 break;
882 case DATA_TYPE_UINT64_ARRAY:
883 nelem = va_arg(ap, int);
884 ret = nvlist_add_uint64_array(payload, name,
885 va_arg(ap, uint64_t *), nelem);
886 break;
887 case DATA_TYPE_STRING:
888 ret = nvlist_add_string(payload, name,
889 va_arg(ap, char *));
890 break;
891 case DATA_TYPE_STRING_ARRAY:
892 nelem = va_arg(ap, int);
893 ret = nvlist_add_string_array(payload, name,
894 va_arg(ap, char **), nelem);
895 break;
896 case DATA_TYPE_NVLIST:
897 ret = nvlist_add_nvlist(payload, name,
898 va_arg(ap, nvlist_t *));
899 break;
900 case DATA_TYPE_NVLIST_ARRAY:
901 nelem = va_arg(ap, int);
902 ret = nvlist_add_nvlist_array(payload, name,
903 va_arg(ap, nvlist_t **), nelem);
904 break;
905 default:
906 ret = EINVAL;
907 }
908
909 name = va_arg(ap, char *);
910 }
911 return (ret);
912 }
913
914 void
915 fm_payload_set(nvlist_t *payload, ...)
916 {
917 int ret;
918 const char *name;
919 va_list ap;
920
921 va_start(ap, payload);
922 name = va_arg(ap, char *);
923 ret = i_fm_payload_set(payload, name, ap);
924 va_end(ap);
925
926 if (ret)
927 atomic_add_64(
928 &erpt_kstat_data.payload_set_failed.value.ui64, 1);
929 }
930
931 /*
932 * Set-up and validate the members of an ereport event according to:
933 *
934 * Member name Type Value
935 * ====================================================
936 * class string ereport
937 * version uint8_t 0
938 * ena uint64_t <ena>
939 * detector nvlist_t <detector>
940 * ereport-payload nvlist_t <var args>
941 *
942 * We don't actually add a 'version' member to the payload. Really,
943 * the version quoted to us by our caller is that of the category 1
944 * "ereport" event class (and we require FM_EREPORT_VERS0) but
945 * the payload version of the actual leaf class event under construction
946 * may be something else. Callers should supply a version in the varargs,
947 * or (better) we could take two version arguments - one for the
948 * ereport category 1 classification (expect FM_EREPORT_VERS0) and one
949 * for the leaf class.
950 */
951 void
952 fm_ereport_set(nvlist_t *ereport, int version, const char *erpt_class,
953 uint64_t ena, const nvlist_t *detector, ...)
954 {
955 char ereport_class[FM_MAX_CLASS];
956 const char *name;
957 va_list ap;
958 int ret;
959
960 if (version != FM_EREPORT_VERS0) {
961 atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
962 return;
963 }
964
965 (void) snprintf(ereport_class, FM_MAX_CLASS, "%s.%s",
966 FM_EREPORT_CLASS, erpt_class);
967 if (nvlist_add_string(ereport, FM_CLASS, ereport_class) != 0) {
968 atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
969 return;
970 }
971
972 if (nvlist_add_uint64(ereport, FM_EREPORT_ENA, ena)) {
973 atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
974 }
975
976 if (nvlist_add_nvlist(ereport, FM_EREPORT_DETECTOR,
977 (nvlist_t *)detector) != 0) {
978 atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
979 }
980
981 va_start(ap, detector);
982 name = va_arg(ap, const char *);
983 ret = i_fm_payload_set(ereport, name, ap);
984 va_end(ap);
985
986 if (ret)
987 atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
988 }
989
990 /*
991 * Set-up and validate the members of an hc fmri according to;
992 *
993 * Member name Type Value
994 * ===================================================
995 * version uint8_t 0
996 * auth nvlist_t <auth>
997 * hc-name string <name>
998 * hc-id string <id>
999 *
1000 * Note that auth and hc-id are optional members.
1001 */
1002
1003 #define HC_MAXPAIRS 20
1004 #define HC_MAXNAMELEN 50
1005
1006 static int
1007 fm_fmri_hc_set_common(nvlist_t *fmri, int version, const nvlist_t *auth)
1008 {
1009 if (version != FM_HC_SCHEME_VERSION) {
1010 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1011 return (0);
1012 }
1013
1014 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0 ||
1015 nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_HC) != 0) {
1016 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1017 return (0);
1018 }
1019
1020 if (auth != NULL && nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
1021 (nvlist_t *)auth) != 0) {
1022 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1023 return (0);
1024 }
1025
1026 return (1);
1027 }
1028
1029 void
1030 fm_fmri_hc_set(nvlist_t *fmri, int version, const nvlist_t *auth,
1031 nvlist_t *snvl, int npairs, ...)
1032 {
1033 nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
1034 nvlist_t *pairs[HC_MAXPAIRS];
1035 va_list ap;
1036 int i;
1037
1038 if (!fm_fmri_hc_set_common(fmri, version, auth))
1039 return;
1040
1041 npairs = MIN(npairs, HC_MAXPAIRS);
1042
1043 va_start(ap, npairs);
1044 for (i = 0; i < npairs; i++) {
1045 const char *name = va_arg(ap, const char *);
1046 uint32_t id = va_arg(ap, uint32_t);
1047 char idstr[11];
1048
1049 (void) snprintf(idstr, sizeof (idstr), "%u", id);
1050
1051 pairs[i] = fm_nvlist_create(nva);
1052 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
1053 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
1054 atomic_add_64(
1055 &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1056 }
1057 }
1058 va_end(ap);
1059
1060 if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs, npairs) != 0)
1061 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1062
1063 for (i = 0; i < npairs; i++)
1064 fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
1065
1066 if (snvl != NULL) {
1067 if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
1068 atomic_add_64(
1069 &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1070 }
1071 }
1072 }
1073
1074 void
1075 fm_fmri_hc_create(nvlist_t *fmri, int version, const nvlist_t *auth,
1076 nvlist_t *snvl, nvlist_t *bboard, int npairs, ...)
1077 {
1078 nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
1079 nvlist_t *pairs[HC_MAXPAIRS];
1080 nvlist_t **hcl;
1081 uint_t n;
1082 int i, j;
1083 va_list ap;
1084 char *hcname, *hcid;
1085
1086 if (!fm_fmri_hc_set_common(fmri, version, auth))
1087 return;
1088
1089 /*
1090 * copy the bboard nvpairs to the pairs array
1091 */
1092 if (nvlist_lookup_nvlist_array(bboard, FM_FMRI_HC_LIST, &hcl, &n)
1093 != 0) {
1094 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1095 return;
1096 }
1097
1098 for (i = 0; i < n; i++) {
1099 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME,
1100 &hcname) != 0) {
1101 atomic_add_64(
1102 &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1103 return;
1104 }
1105 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, &hcid) != 0) {
1106 atomic_add_64(
1107 &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1108 return;
1109 }
1110
1111 pairs[i] = fm_nvlist_create(nva);
1112 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, hcname) != 0 ||
1113 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, hcid) != 0) {
1114 for (j = 0; j <= i; j++) {
1115 if (pairs[j] != NULL)
1116 fm_nvlist_destroy(pairs[j],
1117 FM_NVA_RETAIN);
1118 }
1119 atomic_add_64(
1120 &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1121 return;
1122 }
1123 }
1124
1125 /*
1126 * create the pairs from passed in pairs
1127 */
1128 npairs = MIN(npairs, HC_MAXPAIRS);
1129
1130 va_start(ap, npairs);
1131 for (i = n; i < npairs + n; i++) {
1132 const char *name = va_arg(ap, const char *);
1133 uint32_t id = va_arg(ap, uint32_t);
1134 char idstr[11];
1135 (void) snprintf(idstr, sizeof (idstr), "%u", id);
1136 pairs[i] = fm_nvlist_create(nva);
1137 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
1138 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
1139 for (j = 0; j <= i; j++) {
1140 if (pairs[j] != NULL)
1141 fm_nvlist_destroy(pairs[j],
1142 FM_NVA_RETAIN);
1143 }
1144 atomic_add_64(
1145 &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1146 return;
1147 }
1148 }
1149 va_end(ap);
1150
1151 /*
1152 * Create the fmri hc list
1153 */
1154 if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs,
1155 npairs + n) != 0) {
1156 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1157 return;
1158 }
1159
1160 for (i = 0; i < npairs + n; i++) {
1161 fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
1162 }
1163
1164 if (snvl != NULL) {
1165 if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
1166 atomic_add_64(
1167 &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1168 return;
1169 }
1170 }
1171 }
1172
1173 /*
1174 * Set-up and validate the members of an dev fmri according to:
1175 *
1176 * Member name Type Value
1177 * ====================================================
1178 * version uint8_t 0
1179 * auth nvlist_t <auth>
1180 * devpath string <devpath>
1181 * [devid] string <devid>
1182 * [target-port-l0id] string <target-port-lun0-id>
1183 *
1184 * Note that auth and devid are optional members.
1185 */
1186 void
1187 fm_fmri_dev_set(nvlist_t *fmri_dev, int version, const nvlist_t *auth,
1188 const char *devpath, const char *devid, const char *tpl0)
1189 {
1190 int err = 0;
1191
1192 if (version != DEV_SCHEME_VERSION0) {
1193 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1194 return;
1195 }
1196
1197 err |= nvlist_add_uint8(fmri_dev, FM_VERSION, version);
1198 err |= nvlist_add_string(fmri_dev, FM_FMRI_SCHEME, FM_FMRI_SCHEME_DEV);
1199
1200 if (auth != NULL) {
1201 err |= nvlist_add_nvlist(fmri_dev, FM_FMRI_AUTHORITY,
1202 (nvlist_t *)auth);
1203 }
1204
1205 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_PATH, devpath);
1206
1207 if (devid != NULL)
1208 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_ID, devid);
1209
1210 if (tpl0 != NULL)
1211 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_TGTPTLUN0, tpl0);
1212
1213 if (err)
1214 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1215
1216 }
1217
1218 /*
1219 * Set-up and validate the members of an cpu fmri according to:
1220 *
1221 * Member name Type Value
1222 * ====================================================
1223 * version uint8_t 0
1224 * auth nvlist_t <auth>
1225 * cpuid uint32_t <cpu_id>
1226 * cpumask uint8_t <cpu_mask>
1227 * serial uint64_t <serial_id>
1228 *
1229 * Note that auth, cpumask, serial are optional members.
1230 *
1231 */
1232 void
1233 fm_fmri_cpu_set(nvlist_t *fmri_cpu, int version, const nvlist_t *auth,
1234 uint32_t cpu_id, uint8_t *cpu_maskp, const char *serial_idp)
1235 {
1236 uint64_t *failedp = &erpt_kstat_data.fmri_set_failed.value.ui64;
1237
1238 if (version < CPU_SCHEME_VERSION1) {
1239 atomic_add_64(failedp, 1);
1240 return;
1241 }
1242
1243 if (nvlist_add_uint8(fmri_cpu, FM_VERSION, version) != 0) {
1244 atomic_add_64(failedp, 1);
1245 return;
1246 }
1247
1248 if (nvlist_add_string(fmri_cpu, FM_FMRI_SCHEME,
1249 FM_FMRI_SCHEME_CPU) != 0) {
1250 atomic_add_64(failedp, 1);
1251 return;
1252 }
1253
1254 if (auth != NULL && nvlist_add_nvlist(fmri_cpu, FM_FMRI_AUTHORITY,
1255 (nvlist_t *)auth) != 0)
1256 atomic_add_64(failedp, 1);
1257
1258 if (nvlist_add_uint32(fmri_cpu, FM_FMRI_CPU_ID, cpu_id) != 0)
1259 atomic_add_64(failedp, 1);
1260
1261 if (cpu_maskp != NULL && nvlist_add_uint8(fmri_cpu, FM_FMRI_CPU_MASK,
1262 *cpu_maskp) != 0)
1263 atomic_add_64(failedp, 1);
1264
1265 if (serial_idp == NULL || nvlist_add_string(fmri_cpu,
1266 FM_FMRI_CPU_SERIAL_ID, (char *)serial_idp) != 0)
1267 atomic_add_64(failedp, 1);
1268 }
1269
1270 /*
1271 * Set-up and validate the members of a mem according to:
1272 *
1273 * Member name Type Value
1274 * ====================================================
1275 * version uint8_t 0
1276 * auth nvlist_t <auth> [optional]
1277 * unum string <unum>
1278 * serial string <serial> [optional*]
1279 * offset uint64_t <offset> [optional]
1280 *
1281 * * serial is required if offset is present
1282 */
1283 void
1284 fm_fmri_mem_set(nvlist_t *fmri, int version, const nvlist_t *auth,
1285 const char *unum, const char *serial, uint64_t offset)
1286 {
1287 if (version != MEM_SCHEME_VERSION0) {
1288 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1289 return;
1290 }
1291
1292 if (!serial && (offset != (uint64_t)-1)) {
1293 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1294 return;
1295 }
1296
1297 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
1298 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1299 return;
1300 }
1301
1302 if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_MEM) != 0) {
1303 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1304 return;
1305 }
1306
1307 if (auth != NULL) {
1308 if (nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
1309 (nvlist_t *)auth) != 0) {
1310 atomic_add_64(
1311 &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1312 }
1313 }
1314
1315 if (nvlist_add_string(fmri, FM_FMRI_MEM_UNUM, unum) != 0) {
1316 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1317 }
1318
1319 if (serial != NULL) {
1320 if (nvlist_add_string_array(fmri, FM_FMRI_MEM_SERIAL_ID,
1321 (char **)&serial, 1) != 0) {
1322 atomic_add_64(
1323 &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1324 }
1325 if (offset != (uint64_t)-1) {
1326 if (nvlist_add_uint64(fmri, FM_FMRI_MEM_OFFSET,
1327 offset) != 0) {
1328 atomic_add_64(&erpt_kstat_data.
1329 fmri_set_failed.value.ui64, 1);
1330 }
1331 }
1332 }
1333 }
1334
1335 void
1336 fm_fmri_zfs_set(nvlist_t *fmri, int version, uint64_t pool_guid,
1337 uint64_t vdev_guid)
1338 {
1339 if (version != ZFS_SCHEME_VERSION0) {
1340 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1341 return;
1342 }
1343
1344 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
1345 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1346 return;
1347 }
1348
1349 if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_ZFS) != 0) {
1350 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1351 return;
1352 }
1353
1354 if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_POOL, pool_guid) != 0) {
1355 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1356 }
1357
1358 if (vdev_guid != 0) {
1359 if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_VDEV, vdev_guid) != 0) {
1360 atomic_add_64(
1361 &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1362 }
1363 }
1364 }
1365
1366 uint64_t
1367 fm_ena_increment(uint64_t ena)
1368 {
1369 uint64_t new_ena;
1370
1371 switch (ENA_FORMAT(ena)) {
1372 case FM_ENA_FMT1:
1373 new_ena = ena + (1 << ENA_FMT1_GEN_SHFT);
1374 break;
1375 case FM_ENA_FMT2:
1376 new_ena = ena + (1 << ENA_FMT2_GEN_SHFT);
1377 break;
1378 default:
1379 new_ena = 0;
1380 }
1381
1382 return (new_ena);
1383 }
1384
1385 uint64_t
1386 fm_ena_generate_cpu(uint64_t timestamp, processorid_t cpuid, uchar_t format)
1387 {
1388 uint64_t ena = 0;
1389
1390 switch (format) {
1391 case FM_ENA_FMT1:
1392 if (timestamp) {
1393 ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1394 ((cpuid << ENA_FMT1_CPUID_SHFT) &
1395 ENA_FMT1_CPUID_MASK) |
1396 ((timestamp << ENA_FMT1_TIME_SHFT) &
1397 ENA_FMT1_TIME_MASK));
1398 } else {
1399 ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1400 ((cpuid << ENA_FMT1_CPUID_SHFT) &
1401 ENA_FMT1_CPUID_MASK) |
1402 ((gethrtime() << ENA_FMT1_TIME_SHFT) &
1403 ENA_FMT1_TIME_MASK));
1404 }
1405 break;
1406 case FM_ENA_FMT2:
1407 ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1408 ((timestamp << ENA_FMT2_TIME_SHFT) & ENA_FMT2_TIME_MASK));
1409 break;
1410 default:
1411 break;
1412 }
1413
1414 return (ena);
1415 }
1416
1417 uint64_t
1418 fm_ena_generate(uint64_t timestamp, uchar_t format)
1419 {
1420 uint64_t ena;
1421
1422 kpreempt_disable();
1423 ena = fm_ena_generate_cpu(timestamp, getcpuid(), format);
1424 kpreempt_enable();
1425
1426 return (ena);
1427 }
1428
1429 uint64_t
1430 fm_ena_generation_get(uint64_t ena)
1431 {
1432 uint64_t gen;
1433
1434 switch (ENA_FORMAT(ena)) {
1435 case FM_ENA_FMT1:
1436 gen = (ena & ENA_FMT1_GEN_MASK) >> ENA_FMT1_GEN_SHFT;
1437 break;
1438 case FM_ENA_FMT2:
1439 gen = (ena & ENA_FMT2_GEN_MASK) >> ENA_FMT2_GEN_SHFT;
1440 break;
1441 default:
1442 gen = 0;
1443 break;
1444 }
1445
1446 return (gen);
1447 }
1448
1449 uchar_t
1450 fm_ena_format_get(uint64_t ena)
1451 {
1452
1453 return (ENA_FORMAT(ena));
1454 }
1455
1456 uint64_t
1457 fm_ena_id_get(uint64_t ena)
1458 {
1459 uint64_t id;
1460
1461 switch (ENA_FORMAT(ena)) {
1462 case FM_ENA_FMT1:
1463 id = (ena & ENA_FMT1_ID_MASK) >> ENA_FMT1_ID_SHFT;
1464 break;
1465 case FM_ENA_FMT2:
1466 id = (ena & ENA_FMT2_ID_MASK) >> ENA_FMT2_ID_SHFT;
1467 break;
1468 default:
1469 id = 0;
1470 }
1471
1472 return (id);
1473 }
1474
1475 uint64_t
1476 fm_ena_time_get(uint64_t ena)
1477 {
1478 uint64_t time;
1479
1480 switch (ENA_FORMAT(ena)) {
1481 case FM_ENA_FMT1:
1482 time = (ena & ENA_FMT1_TIME_MASK) >> ENA_FMT1_TIME_SHFT;
1483 break;
1484 case FM_ENA_FMT2:
1485 time = (ena & ENA_FMT2_TIME_MASK) >> ENA_FMT2_TIME_SHFT;
1486 break;
1487 default:
1488 time = 0;
1489 }
1490
1491 return (time);
1492 }
1493
1494 #ifdef _KERNEL
1495 void
1496 fm_init(void)
1497 {
1498 zevent_len_cur = 0;
1499 zevent_flags = 0;
1500
1501 if (zfs_zevent_len_max == 0)
1502 zfs_zevent_len_max = ERPT_MAX_ERRS * MAX(max_ncpus, 4);
1503
1504 /* Initialize zevent allocation and generation kstats */
1505 fm_ksp = kstat_create("zfs", 0, "fm", "misc", KSTAT_TYPE_NAMED,
1506 sizeof (struct erpt_kstat) / sizeof (kstat_named_t),
1507 KSTAT_FLAG_VIRTUAL);
1508
1509 if (fm_ksp != NULL) {
1510 fm_ksp->ks_data = &erpt_kstat_data;
1511 kstat_install(fm_ksp);
1512 } else {
1513 cmn_err(CE_NOTE, "failed to create fm/misc kstat\n");
1514 }
1515
1516 mutex_init(&zevent_lock, NULL, MUTEX_DEFAULT, NULL);
1517 list_create(&zevent_list, sizeof (zevent_t),
1518 offsetof(zevent_t, ev_node));
1519 cv_init(&zevent_cv, NULL, CV_DEFAULT, NULL);
1520 }
1521
1522 void
1523 fm_fini(void)
1524 {
1525 int count;
1526
1527 zfs_zevent_drain_all(&count);
1528
1529 mutex_enter(&zevent_lock);
1530 cv_broadcast(&zevent_cv);
1531
1532 zevent_flags |= ZEVENT_SHUTDOWN;
1533 while (zevent_waiters > 0) {
1534 mutex_exit(&zevent_lock);
1535 schedule();
1536 mutex_enter(&zevent_lock);
1537 }
1538 mutex_exit(&zevent_lock);
1539
1540 cv_destroy(&zevent_cv);
1541 list_destroy(&zevent_list);
1542 mutex_destroy(&zevent_lock);
1543
1544 if (fm_ksp != NULL) {
1545 kstat_delete(fm_ksp);
1546 fm_ksp = NULL;
1547 }
1548 }
1549
1550 module_param(zfs_zevent_len_max, int, 0644);
1551 MODULE_PARM_DESC(zfs_zevent_len_max, "Max event queue length");
1552
1553 module_param(zfs_zevent_cols, int, 0644);
1554 MODULE_PARM_DESC(zfs_zevent_cols, "Max event column width");
1555
1556 module_param(zfs_zevent_console, int, 0644);
1557 MODULE_PARM_DESC(zfs_zevent_console, "Log events to the console");
1558
1559 #endif /* _KERNEL */