]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/fm.c
Add missing ZFS tunables
[mirror_zfs.git] / module / zfs / fm.c
CommitLineData
fa42225a
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
fa42225a
BB
23 */
24
25/*
26 * Fault Management Architecture (FMA) Resource and Protocol Support
27 *
28 * The routines contained herein provide services to support kernel subsystems
29 * in publishing fault management telemetry (see PSARC 2002/412 and 2003/089).
30 *
31 * Name-Value Pair Lists
32 *
33 * The embodiment of an FMA protocol element (event, fmri or authority) is a
34 * name-value pair list (nvlist_t). FMA-specific nvlist construtor and
35 * destructor functions, fm_nvlist_create() and fm_nvlist_destroy(), are used
36 * to create an nvpair list using custom allocators. Callers may choose to
37 * allocate either from the kernel memory allocator, or from a preallocated
38 * buffer, useful in constrained contexts like high-level interrupt routines.
39 *
40 * Protocol Event and FMRI Construction
41 *
42 * Convenience routines are provided to construct nvlist events according to
43 * the FMA Event Protocol and Naming Schema specification for ereports and
44 * FMRIs for the dev, cpu, hc, mem, legacy hc and de schemes.
45 *
46 * ENA Manipulation
47 *
48 * Routines to generate ENA formats 0, 1 and 2 are available as well as
49 * routines to increment formats 1 and 2. Individual fields within the
50 * ENA are extractable via fm_ena_time_get(), fm_ena_id_get(),
51 * fm_ena_format_get() and fm_ena_gen_get().
52 */
53
54#include <sys/types.h>
55#include <sys/time.h>
26685276 56#include <sys/list.h>
fa42225a
BB
57#include <sys/nvpair.h>
58#include <sys/cmn_err.h>
fa42225a 59#include <sys/sysmacros.h>
fa42225a 60#include <sys/compress.h>
fa42225a
BB
61#include <sys/sunddi.h>
62#include <sys/systeminfo.h>
fa42225a
BB
63#include <sys/fm/util.h>
64#include <sys/fm/protocol.h>
26685276
BB
65#include <sys/kstat.h>
66#include <sys/zfs_context.h>
67#ifdef _KERNEL
68#include <sys/atomic.h>
69#include <sys/condvar.h>
70#include <sys/cpuvar.h>
71#include <sys/systm.h>
72#include <sys/dumphdr.h>
73#include <sys/cpuvar.h>
74#include <sys/console.h>
75#include <sys/kobj.h>
76#include <sys/time.h>
77#include <sys/zfs_ioctl.h>
fa42225a 78
c409e464
BB
79int zfs_zevent_len_max = 0;
80int zfs_zevent_cols = 80;
81int zfs_zevent_console = 0;
fa42225a 82
26685276
BB
83static int zevent_len_cur = 0;
84static int zevent_waiters = 0;
85static int zevent_flags = 0;
fa42225a 86
26685276
BB
87static kmutex_t zevent_lock;
88static list_t zevent_list;
89static kcondvar_t zevent_cv;
90#endif /* _KERNEL */
fa42225a 91
428870ff
BB
92extern void fastreboot_disable_highpil(void);
93
fa42225a 94/*
26685276 95 * Common fault management kstats to record event generation failures
fa42225a
BB
96 */
97
98struct erpt_kstat {
99 kstat_named_t erpt_dropped; /* num erpts dropped on post */
100 kstat_named_t erpt_set_failed; /* num erpt set failures */
101 kstat_named_t fmri_set_failed; /* num fmri set failures */
102 kstat_named_t payload_set_failed; /* num payload set failures */
103};
104
105static struct erpt_kstat erpt_kstat_data = {
106 { "erpt-dropped", KSTAT_DATA_UINT64 },
107 { "erpt-set-failed", KSTAT_DATA_UINT64 },
108 { "fmri-set-failed", KSTAT_DATA_UINT64 },
109 { "payload-set-failed", KSTAT_DATA_UINT64 }
110};
111
26685276 112kstat_t *fm_ksp;
fa42225a 113
26685276 114#ifdef _KERNEL
fa42225a
BB
115
116/*
117 * Formatting utility function for fm_nvprintr. We attempt to wrap chunks of
118 * output so they aren't split across console lines, and return the end column.
119 */
120/*PRINTFLIKE4*/
121static int
122fm_printf(int depth, int c, int cols, const char *format, ...)
123{
124 va_list ap;
125 int width;
126 char c1;
127
128 va_start(ap, format);
129 width = vsnprintf(&c1, sizeof (c1), format, ap);
130 va_end(ap);
131
132 if (c + width >= cols) {
26685276 133 console_printf("\n");
fa42225a
BB
134 c = 0;
135 if (format[0] != ' ' && depth > 0) {
136 console_printf(" ");
137 c++;
138 }
139 }
140
141 va_start(ap, format);
142 console_vprintf(format, ap);
143 va_end(ap);
144
145 return ((c + width) % cols);
146}
147
148/*
149 * Recursively print a nvlist in the specified column width and return the
150 * column we end up in. This function is called recursively by fm_nvprint(),
151 * below. We generically format the entire nvpair using hexadecimal
152 * integers and strings, and elide any integer arrays. Arrays are basically
153 * used for cache dumps right now, so we suppress them so as not to overwhelm
154 * the amount of console output we produce at panic time. This can be further
155 * enhanced as FMA technology grows based upon the needs of consumers. All
156 * FMA telemetry is logged using the dump device transport, so the console
157 * output serves only as a fallback in case this procedure is unsuccessful.
158 */
159static int
160fm_nvprintr(nvlist_t *nvl, int d, int c, int cols)
161{
162 nvpair_t *nvp;
163
164 for (nvp = nvlist_next_nvpair(nvl, NULL);
165 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
166
167 data_type_t type = nvpair_type(nvp);
168 const char *name = nvpair_name(nvp);
169
170 boolean_t b;
171 uint8_t i8;
172 uint16_t i16;
173 uint32_t i32;
174 uint64_t i64;
175 char *str;
176 nvlist_t *cnv;
177
178 if (strcmp(name, FM_CLASS) == 0)
179 continue; /* already printed by caller */
180
181 c = fm_printf(d, c, cols, " %s=", name);
182
183 switch (type) {
184 case DATA_TYPE_BOOLEAN:
185 c = fm_printf(d + 1, c, cols, " 1");
186 break;
187
188 case DATA_TYPE_BOOLEAN_VALUE:
189 (void) nvpair_value_boolean_value(nvp, &b);
190 c = fm_printf(d + 1, c, cols, b ? "1" : "0");
191 break;
192
193 case DATA_TYPE_BYTE:
194 (void) nvpair_value_byte(nvp, &i8);
26685276 195 c = fm_printf(d + 1, c, cols, "0x%x", i8);
fa42225a
BB
196 break;
197
198 case DATA_TYPE_INT8:
199 (void) nvpair_value_int8(nvp, (void *)&i8);
26685276 200 c = fm_printf(d + 1, c, cols, "0x%x", i8);
fa42225a
BB
201 break;
202
203 case DATA_TYPE_UINT8:
204 (void) nvpair_value_uint8(nvp, &i8);
26685276 205 c = fm_printf(d + 1, c, cols, "0x%x", i8);
fa42225a
BB
206 break;
207
208 case DATA_TYPE_INT16:
209 (void) nvpair_value_int16(nvp, (void *)&i16);
26685276 210 c = fm_printf(d + 1, c, cols, "0x%x", i16);
fa42225a
BB
211 break;
212
213 case DATA_TYPE_UINT16:
214 (void) nvpair_value_uint16(nvp, &i16);
26685276 215 c = fm_printf(d + 1, c, cols, "0x%x", i16);
fa42225a
BB
216 break;
217
218 case DATA_TYPE_INT32:
219 (void) nvpair_value_int32(nvp, (void *)&i32);
26685276 220 c = fm_printf(d + 1, c, cols, "0x%x", i32);
fa42225a
BB
221 break;
222
223 case DATA_TYPE_UINT32:
224 (void) nvpair_value_uint32(nvp, &i32);
26685276 225 c = fm_printf(d + 1, c, cols, "0x%x", i32);
fa42225a
BB
226 break;
227
228 case DATA_TYPE_INT64:
229 (void) nvpair_value_int64(nvp, (void *)&i64);
26685276 230 c = fm_printf(d + 1, c, cols, "0x%llx",
fa42225a
BB
231 (u_longlong_t)i64);
232 break;
233
234 case DATA_TYPE_UINT64:
235 (void) nvpair_value_uint64(nvp, &i64);
26685276 236 c = fm_printf(d + 1, c, cols, "0x%llx",
fa42225a
BB
237 (u_longlong_t)i64);
238 break;
239
240 case DATA_TYPE_HRTIME:
241 (void) nvpair_value_hrtime(nvp, (void *)&i64);
26685276 242 c = fm_printf(d + 1, c, cols, "0x%llx",
fa42225a
BB
243 (u_longlong_t)i64);
244 break;
245
246 case DATA_TYPE_STRING:
247 (void) nvpair_value_string(nvp, &str);
248 c = fm_printf(d + 1, c, cols, "\"%s\"",
249 str ? str : "<NULL>");
250 break;
251
252 case DATA_TYPE_NVLIST:
253 c = fm_printf(d + 1, c, cols, "[");
254 (void) nvpair_value_nvlist(nvp, &cnv);
255 c = fm_nvprintr(cnv, d + 1, c, cols);
256 c = fm_printf(d + 1, c, cols, " ]");
257 break;
258
259 case DATA_TYPE_NVLIST_ARRAY: {
260 nvlist_t **val;
261 uint_t i, nelem;
262
263 c = fm_printf(d + 1, c, cols, "[");
264 (void) nvpair_value_nvlist_array(nvp, &val, &nelem);
265 for (i = 0; i < nelem; i++) {
266 c = fm_nvprintr(val[i], d + 1, c, cols);
267 }
268 c = fm_printf(d + 1, c, cols, " ]");
269 }
270 break;
271
26685276
BB
272 case DATA_TYPE_INT8_ARRAY: {
273 int8_t *val;
274 uint_t i, nelem;
275
276 c = fm_printf(d + 1, c, cols, "[ ");
277 (void) nvpair_value_int8_array(nvp, &val, &nelem);
278 for (i = 0; i < nelem; i++)
279 c = fm_printf(d + 1, c, cols, "0x%llx ",
280 (u_longlong_t)val[i]);
281
282 c = fm_printf(d + 1, c, cols, "]");
283 break;
284 }
285
286 case DATA_TYPE_UINT8_ARRAY: {
287 uint8_t *val;
288 uint_t i, nelem;
289
290 c = fm_printf(d + 1, c, cols, "[ ");
291 (void) nvpair_value_uint8_array(nvp, &val, &nelem);
292 for (i = 0; i < nelem; i++)
293 c = fm_printf(d + 1, c, cols, "0x%llx ",
294 (u_longlong_t)val[i]);
295
296 c = fm_printf(d + 1, c, cols, "]");
297 break;
298 }
299
300 case DATA_TYPE_INT16_ARRAY: {
301 int16_t *val;
302 uint_t i, nelem;
303
304 c = fm_printf(d + 1, c, cols, "[ ");
305 (void) nvpair_value_int16_array(nvp, &val, &nelem);
306 for (i = 0; i < nelem; i++)
307 c = fm_printf(d + 1, c, cols, "0x%llx ",
308 (u_longlong_t)val[i]);
309
310 c = fm_printf(d + 1, c, cols, "]");
311 break;
312 }
313
314 case DATA_TYPE_UINT16_ARRAY: {
315 uint16_t *val;
316 uint_t i, nelem;
317
318 c = fm_printf(d + 1, c, cols, "[ ");
319 (void) nvpair_value_uint16_array(nvp, &val, &nelem);
320 for (i = 0; i < nelem; i++)
321 c = fm_printf(d + 1, c, cols, "0x%llx ",
322 (u_longlong_t)val[i]);
323
324 c = fm_printf(d + 1, c, cols, "]");
325 break;
326 }
327
328 case DATA_TYPE_INT32_ARRAY: {
329 int32_t *val;
330 uint_t i, nelem;
331
332 c = fm_printf(d + 1, c, cols, "[ ");
333 (void) nvpair_value_int32_array(nvp, &val, &nelem);
334 for (i = 0; i < nelem; i++)
335 c = fm_printf(d + 1, c, cols, "0x%llx ",
336 (u_longlong_t)val[i]);
337
338 c = fm_printf(d + 1, c, cols, "]");
339 break;
340 }
341
342 case DATA_TYPE_UINT32_ARRAY: {
343 uint32_t *val;
344 uint_t i, nelem;
345
346 c = fm_printf(d + 1, c, cols, "[ ");
347 (void) nvpair_value_uint32_array(nvp, &val, &nelem);
348 for (i = 0; i < nelem; i++)
349 c = fm_printf(d + 1, c, cols, "0x%llx ",
350 (u_longlong_t)val[i]);
351
352 c = fm_printf(d + 1, c, cols, "]");
353 break;
354 }
355
356 case DATA_TYPE_INT64_ARRAY: {
357 int64_t *val;
358 uint_t i, nelem;
359
360 c = fm_printf(d + 1, c, cols, "[ ");
361 (void) nvpair_value_int64_array(nvp, &val, &nelem);
362 for (i = 0; i < nelem; i++)
363 c = fm_printf(d + 1, c, cols, "0x%llx ",
364 (u_longlong_t)val[i]);
365
366 c = fm_printf(d + 1, c, cols, "]");
367 break;
368 }
369
370 case DATA_TYPE_UINT64_ARRAY: {
371 uint64_t *val;
372 uint_t i, nelem;
373
374 c = fm_printf(d + 1, c, cols, "[ ");
375 (void) nvpair_value_uint64_array(nvp, &val, &nelem);
376 for (i = 0; i < nelem; i++)
377 c = fm_printf(d + 1, c, cols, "0x%llx ",
378 (u_longlong_t)val[i]);
379
380 c = fm_printf(d + 1, c, cols, "]");
381 break;
382 }
383
384 case DATA_TYPE_STRING_ARRAY:
fa42225a
BB
385 case DATA_TYPE_BOOLEAN_ARRAY:
386 case DATA_TYPE_BYTE_ARRAY:
fa42225a
BB
387 c = fm_printf(d + 1, c, cols, "[...]");
388 break;
26685276 389
fa42225a
BB
390 case DATA_TYPE_UNKNOWN:
391 c = fm_printf(d + 1, c, cols, "<unknown>");
392 break;
393 }
394 }
395
396 return (c);
397}
398
399void
400fm_nvprint(nvlist_t *nvl)
401{
402 char *class;
403 int c = 0;
404
26685276 405 console_printf("\n");
fa42225a
BB
406
407 if (nvlist_lookup_string(nvl, FM_CLASS, &class) == 0)
c409e464 408 c = fm_printf(0, c, zfs_zevent_cols, "%s", class);
fa42225a 409
c409e464 410 if (fm_nvprintr(nvl, 0, c, zfs_zevent_cols) != 0)
fa42225a
BB
411 console_printf("\n");
412
413 console_printf("\n");
414}
415
26685276
BB
416static zevent_t *
417zfs_zevent_alloc(void)
418{
419 zevent_t *ev;
420
421 ev = kmem_zalloc(sizeof(zevent_t), KM_SLEEP);
422 if (ev == NULL)
423 return NULL;
424
425 list_create(&ev->ev_ze_list, sizeof(zfs_zevent_t),
426 offsetof(zfs_zevent_t, ze_node));
427 list_link_init(&ev->ev_node);
428
429 return ev;
430}
431
432static void
433zfs_zevent_free(zevent_t *ev)
434{
435 /* Run provided cleanup callback */
436 ev->ev_cb(ev->ev_nvl, ev->ev_detector);
437
438 list_destroy(&ev->ev_ze_list);
439 kmem_free(ev, sizeof(zevent_t));
440}
441
442static void
443zfs_zevent_drain(zevent_t *ev)
444{
445 zfs_zevent_t *ze;
446
447 ASSERT(MUTEX_HELD(&zevent_lock));
448 list_remove(&zevent_list, ev);
449
450 /* Remove references to this event in all private file data */
451 while ((ze = list_head(&ev->ev_ze_list)) != NULL) {
452 list_remove(&ev->ev_ze_list, ze);
453 ze->ze_zevent = NULL;
454 ze->ze_dropped++;
455 }
456
457 zfs_zevent_free(ev);
458}
459
fa42225a 460void
26685276 461zfs_zevent_drain_all(int *count)
fa42225a 462{
26685276 463 zevent_t *ev;
fa42225a 464
26685276
BB
465 mutex_enter(&zevent_lock);
466 while ((ev = list_head(&zevent_list)) != NULL)
467 zfs_zevent_drain(ev);
468
469 *count = zevent_len_cur;
470 zevent_len_cur = 0;
471 mutex_exit(&zevent_lock);
fa42225a
BB
472}
473
572e2857 474/*
26685276
BB
475 * New zevents are inserted at the head. If the maximum queue
476 * length is exceeded a zevent will be drained from the tail.
477 * As part of this any user space processes which currently have
478 * a reference to this zevent_t in their private data will have
479 * this reference set to NULL.
572e2857 480 */
26685276
BB
481static void
482zfs_zevent_insert(zevent_t *ev)
572e2857 483{
26685276
BB
484 mutex_enter(&zevent_lock);
485 list_insert_head(&zevent_list, ev);
c409e464 486 if (zevent_len_cur >= zfs_zevent_len_max)
26685276 487 zfs_zevent_drain(list_tail(&zevent_list));
572e2857 488 else
26685276
BB
489 zevent_len_cur++;
490
491 mutex_exit(&zevent_lock);
572e2857
BB
492}
493
fa42225a 494/*
26685276 495 * Post a zevent
fa42225a
BB
496 */
497void
26685276 498zfs_zevent_post(nvlist_t *nvl, nvlist_t *detector, zevent_cb_t *cb)
fa42225a 499{
26685276
BB
500 int64_t tv_array[2];
501 timestruc_t tv;
502 size_t nvl_size = 0;
503 zevent_t *ev;
fa42225a 504
26685276
BB
505 gethrestime(&tv);
506 tv_array[0] = tv.tv_sec;
507 tv_array[1] = tv.tv_nsec;
508 if (nvlist_add_int64_array(nvl, FM_EREPORT_TIME, tv_array, 2)) {
509 atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
510 return;
511 }
fa42225a 512
26685276
BB
513 (void) nvlist_size(nvl, &nvl_size, NV_ENCODE_NATIVE);
514 if (nvl_size > ERPT_DATA_SZ || nvl_size == 0) {
515 atomic_add_64(&erpt_kstat_data.erpt_dropped.value.ui64, 1);
516 return;
fa42225a
BB
517 }
518
c409e464 519 if (zfs_zevent_console)
26685276 520 fm_nvprint(nvl);
fa42225a 521
26685276
BB
522 ev = zfs_zevent_alloc();
523 if (ev == NULL) {
524 atomic_add_64(&erpt_kstat_data.erpt_dropped.value.ui64, 1);
525 return;
526 }
fa42225a 527
26685276
BB
528 ev->ev_nvl = nvl;
529 ev->ev_detector = detector;
530 ev->ev_cb = cb;
531 zfs_zevent_insert(ev);
532 cv_broadcast(&zevent_cv);
533}
fa42225a 534
26685276
BB
535static int
536zfs_zevent_minor_to_state(minor_t minor, zfs_zevent_t **ze)
537{
538 *ze = zfsdev_get_state(minor, ZST_ZEVENT);
539 if (*ze == NULL)
540 return (EBADF);
fa42225a 541
26685276
BB
542 return (0);
543}
fa42225a 544
26685276
BB
545int
546zfs_zevent_fd_hold(int fd, minor_t *minorp, zfs_zevent_t **ze)
547{
548 file_t *fp;
549 int error;
550
551 fp = getf(fd);
552 if (fp == NULL)
553 return (EBADF);
554
555 *minorp = zfsdev_getminor(fp->f_file);
556 error = zfs_zevent_minor_to_state(*minorp, ze);
557
558 if (error)
559 zfs_zevent_fd_rele(fd);
560
561 return (error);
562}
563
564void
565zfs_zevent_fd_rele(int fd)
566{
567 releasef(fd);
fa42225a
BB
568}
569
570/*
baa40d45
BB
571 * Get the next zevent in the stream and place a copy in 'event'. This
572 * may fail with ENOMEM if the encoded nvlist size exceeds the passed
573 * 'event_size'. In this case the stream pointer is not advanced and
574 * and 'event_size' is set to the minimum required buffer size.
fa42225a 575 */
26685276 576int
baa40d45
BB
577zfs_zevent_next(zfs_zevent_t *ze, nvlist_t **event, uint64_t *event_size,
578 uint64_t *dropped)
fa42225a 579{
26685276 580 zevent_t *ev;
baa40d45
BB
581 size_t size;
582 int error = 0;
26685276
BB
583
584 mutex_enter(&zevent_lock);
585 if (ze->ze_zevent == NULL) {
586 /* New stream start at the beginning/tail */
587 ev = list_tail(&zevent_list);
588 if (ev == NULL) {
589 error = ENOENT;
590 goto out;
591 }
fa42225a 592 } else {
26685276
BB
593 /* Existing stream continue with the next element and remove
594 * ourselves from the wait queue for the previous element */
595 ev = list_prev(&zevent_list, ze->ze_zevent);
596 if (ev == NULL) {
597 error = ENOENT;
598 goto out;
599 }
baa40d45 600 }
26685276 601
baa40d45
BB
602 VERIFY(nvlist_size(ev->ev_nvl, &size, NV_ENCODE_NATIVE) == 0);
603 if (size > *event_size) {
604 *event_size = size;
605 error = ENOMEM;
606 goto out;
fa42225a
BB
607 }
608
baa40d45
BB
609 if (ze->ze_zevent)
610 list_remove(&ze->ze_zevent->ev_ze_list, ze);
611
26685276
BB
612 ze->ze_zevent = ev;
613 list_insert_head(&ev->ev_ze_list, ze);
614 nvlist_dup(ev->ev_nvl, event, KM_SLEEP);
615 *dropped = ze->ze_dropped;
616 ze->ze_dropped = 0;
617out:
618 mutex_exit(&zevent_lock);
fa42225a 619
26685276
BB
620 return error;
621}
622
623int
624zfs_zevent_wait(zfs_zevent_t *ze)
625{
626 int error = 0;
627
628 mutex_enter(&zevent_lock);
fa42225a 629
26685276
BB
630 if (zevent_flags & ZEVENT_SHUTDOWN) {
631 error = ESHUTDOWN;
632 goto out;
fa42225a
BB
633 }
634
26685276
BB
635 zevent_waiters++;
636 cv_wait_interruptible(&zevent_cv, &zevent_lock);
637 if (issig(JUSTLOOKING))
638 error = EINTR;
639
640 zevent_waiters--;
641out:
642 mutex_exit(&zevent_lock);
643
644 return error;
fa42225a
BB
645}
646
fa42225a 647void
26685276 648zfs_zevent_init(zfs_zevent_t **zep)
fa42225a 649{
26685276 650 zfs_zevent_t *ze;
fa42225a 651
26685276
BB
652 ze = *zep = kmem_zalloc(sizeof (zfs_zevent_t), KM_SLEEP);
653 list_link_init(&ze->ze_node);
654}
fa42225a 655
26685276
BB
656void
657zfs_zevent_destroy(zfs_zevent_t *ze)
658{
659 mutex_enter(&zevent_lock);
660 if (ze->ze_zevent)
661 list_remove(&ze->ze_zevent->ev_ze_list, ze);
662 mutex_exit(&zevent_lock);
fa42225a 663
26685276 664 kmem_free(ze, sizeof (zfs_zevent_t));
fa42225a 665}
26685276 666#endif /* _KERNEL */
fa42225a
BB
667
668/*
669 * Wrapppers for FM nvlist allocators
670 */
671/* ARGSUSED */
672static void *
673i_fm_alloc(nv_alloc_t *nva, size_t size)
674{
675 return (kmem_zalloc(size, KM_SLEEP));
676}
677
678/* ARGSUSED */
679static void
680i_fm_free(nv_alloc_t *nva, void *buf, size_t size)
681{
682 kmem_free(buf, size);
683}
684
685const nv_alloc_ops_t fm_mem_alloc_ops = {
686 NULL,
687 NULL,
688 i_fm_alloc,
689 i_fm_free,
690 NULL
691};
692
693/*
694 * Create and initialize a new nv_alloc_t for a fixed buffer, buf. A pointer
695 * to the newly allocated nv_alloc_t structure is returned upon success or NULL
696 * is returned to indicate that the nv_alloc structure could not be created.
697 */
698nv_alloc_t *
699fm_nva_xcreate(char *buf, size_t bufsz)
700{
701 nv_alloc_t *nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP);
702
703 if (bufsz == 0 || nv_alloc_init(nvhdl, nv_fixed_ops, buf, bufsz) != 0) {
704 kmem_free(nvhdl, sizeof (nv_alloc_t));
705 return (NULL);
706 }
707
708 return (nvhdl);
709}
710
711/*
712 * Destroy a previously allocated nv_alloc structure. The fixed buffer
713 * associated with nva must be freed by the caller.
714 */
715void
716fm_nva_xdestroy(nv_alloc_t *nva)
717{
718 nv_alloc_fini(nva);
719 kmem_free(nva, sizeof (nv_alloc_t));
720}
721
722/*
723 * Create a new nv list. A pointer to a new nv list structure is returned
724 * upon success or NULL is returned to indicate that the structure could
725 * not be created. The newly created nv list is created and managed by the
726 * operations installed in nva. If nva is NULL, the default FMA nva
727 * operations are installed and used.
728 *
729 * When called from the kernel and nva == NULL, this function must be called
730 * from passive kernel context with no locks held that can prevent a
731 * sleeping memory allocation from occurring. Otherwise, this function may
732 * be called from other kernel contexts as long a valid nva created via
733 * fm_nva_create() is supplied.
734 */
735nvlist_t *
736fm_nvlist_create(nv_alloc_t *nva)
737{
738 int hdl_alloced = 0;
739 nvlist_t *nvl;
740 nv_alloc_t *nvhdl;
741
742 if (nva == NULL) {
743 nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP);
744
745 if (nv_alloc_init(nvhdl, &fm_mem_alloc_ops, NULL, 0) != 0) {
746 kmem_free(nvhdl, sizeof (nv_alloc_t));
747 return (NULL);
748 }
749 hdl_alloced = 1;
750 } else {
751 nvhdl = nva;
752 }
753
754 if (nvlist_xalloc(&nvl, NV_UNIQUE_NAME, nvhdl) != 0) {
755 if (hdl_alloced) {
fa42225a 756 nv_alloc_fini(nvhdl);
572e2857 757 kmem_free(nvhdl, sizeof (nv_alloc_t));
fa42225a
BB
758 }
759 return (NULL);
760 }
761
762 return (nvl);
763}
764
765/*
766 * Destroy a previously allocated nvlist structure. flag indicates whether
767 * or not the associated nva structure should be freed (FM_NVA_FREE) or
768 * retained (FM_NVA_RETAIN). Retaining the nv alloc structure allows
769 * it to be re-used for future nvlist creation operations.
770 */
771void
772fm_nvlist_destroy(nvlist_t *nvl, int flag)
773{
774 nv_alloc_t *nva = nvlist_lookup_nv_alloc(nvl);
775
776 nvlist_free(nvl);
777
778 if (nva != NULL) {
779 if (flag == FM_NVA_FREE)
780 fm_nva_xdestroy(nva);
781 }
782}
783
784int
785i_fm_payload_set(nvlist_t *payload, const char *name, va_list ap)
786{
787 int nelem, ret = 0;
788 data_type_t type;
789
790 while (ret == 0 && name != NULL) {
791 type = va_arg(ap, data_type_t);
792 switch (type) {
793 case DATA_TYPE_BYTE:
794 ret = nvlist_add_byte(payload, name,
795 va_arg(ap, uint_t));
796 break;
797 case DATA_TYPE_BYTE_ARRAY:
798 nelem = va_arg(ap, int);
799 ret = nvlist_add_byte_array(payload, name,
800 va_arg(ap, uchar_t *), nelem);
801 break;
802 case DATA_TYPE_BOOLEAN_VALUE:
803 ret = nvlist_add_boolean_value(payload, name,
804 va_arg(ap, boolean_t));
805 break;
806 case DATA_TYPE_BOOLEAN_ARRAY:
807 nelem = va_arg(ap, int);
808 ret = nvlist_add_boolean_array(payload, name,
809 va_arg(ap, boolean_t *), nelem);
810 break;
811 case DATA_TYPE_INT8:
812 ret = nvlist_add_int8(payload, name,
813 va_arg(ap, int));
814 break;
815 case DATA_TYPE_INT8_ARRAY:
816 nelem = va_arg(ap, int);
817 ret = nvlist_add_int8_array(payload, name,
818 va_arg(ap, int8_t *), nelem);
819 break;
820 case DATA_TYPE_UINT8:
821 ret = nvlist_add_uint8(payload, name,
822 va_arg(ap, uint_t));
823 break;
824 case DATA_TYPE_UINT8_ARRAY:
825 nelem = va_arg(ap, int);
826 ret = nvlist_add_uint8_array(payload, name,
827 va_arg(ap, uint8_t *), nelem);
828 break;
829 case DATA_TYPE_INT16:
830 ret = nvlist_add_int16(payload, name,
831 va_arg(ap, int));
832 break;
833 case DATA_TYPE_INT16_ARRAY:
834 nelem = va_arg(ap, int);
835 ret = nvlist_add_int16_array(payload, name,
836 va_arg(ap, int16_t *), nelem);
837 break;
838 case DATA_TYPE_UINT16:
839 ret = nvlist_add_uint16(payload, name,
840 va_arg(ap, uint_t));
841 break;
842 case DATA_TYPE_UINT16_ARRAY:
843 nelem = va_arg(ap, int);
844 ret = nvlist_add_uint16_array(payload, name,
845 va_arg(ap, uint16_t *), nelem);
846 break;
847 case DATA_TYPE_INT32:
848 ret = nvlist_add_int32(payload, name,
849 va_arg(ap, int32_t));
850 break;
851 case DATA_TYPE_INT32_ARRAY:
852 nelem = va_arg(ap, int);
853 ret = nvlist_add_int32_array(payload, name,
854 va_arg(ap, int32_t *), nelem);
855 break;
856 case DATA_TYPE_UINT32:
857 ret = nvlist_add_uint32(payload, name,
858 va_arg(ap, uint32_t));
859 break;
860 case DATA_TYPE_UINT32_ARRAY:
861 nelem = va_arg(ap, int);
862 ret = nvlist_add_uint32_array(payload, name,
863 va_arg(ap, uint32_t *), nelem);
864 break;
865 case DATA_TYPE_INT64:
866 ret = nvlist_add_int64(payload, name,
867 va_arg(ap, int64_t));
868 break;
869 case DATA_TYPE_INT64_ARRAY:
870 nelem = va_arg(ap, int);
871 ret = nvlist_add_int64_array(payload, name,
872 va_arg(ap, int64_t *), nelem);
873 break;
874 case DATA_TYPE_UINT64:
875 ret = nvlist_add_uint64(payload, name,
876 va_arg(ap, uint64_t));
877 break;
878 case DATA_TYPE_UINT64_ARRAY:
879 nelem = va_arg(ap, int);
880 ret = nvlist_add_uint64_array(payload, name,
881 va_arg(ap, uint64_t *), nelem);
882 break;
883 case DATA_TYPE_STRING:
884 ret = nvlist_add_string(payload, name,
885 va_arg(ap, char *));
886 break;
887 case DATA_TYPE_STRING_ARRAY:
888 nelem = va_arg(ap, int);
889 ret = nvlist_add_string_array(payload, name,
890 va_arg(ap, char **), nelem);
891 break;
892 case DATA_TYPE_NVLIST:
893 ret = nvlist_add_nvlist(payload, name,
894 va_arg(ap, nvlist_t *));
895 break;
896 case DATA_TYPE_NVLIST_ARRAY:
897 nelem = va_arg(ap, int);
898 ret = nvlist_add_nvlist_array(payload, name,
899 va_arg(ap, nvlist_t **), nelem);
900 break;
901 default:
902 ret = EINVAL;
903 }
904
905 name = va_arg(ap, char *);
906 }
907 return (ret);
908}
909
910void
911fm_payload_set(nvlist_t *payload, ...)
912{
913 int ret;
914 const char *name;
915 va_list ap;
916
917 va_start(ap, payload);
918 name = va_arg(ap, char *);
919 ret = i_fm_payload_set(payload, name, ap);
920 va_end(ap);
921
922 if (ret)
923 atomic_add_64(
924 &erpt_kstat_data.payload_set_failed.value.ui64, 1);
925}
926
927/*
928 * Set-up and validate the members of an ereport event according to:
929 *
930 * Member name Type Value
931 * ====================================================
932 * class string ereport
933 * version uint8_t 0
934 * ena uint64_t <ena>
935 * detector nvlist_t <detector>
936 * ereport-payload nvlist_t <var args>
937 *
428870ff
BB
938 * We don't actually add a 'version' member to the payload. Really,
939 * the version quoted to us by our caller is that of the category 1
940 * "ereport" event class (and we require FM_EREPORT_VERS0) but
941 * the payload version of the actual leaf class event under construction
942 * may be something else. Callers should supply a version in the varargs,
943 * or (better) we could take two version arguments - one for the
944 * ereport category 1 classification (expect FM_EREPORT_VERS0) and one
945 * for the leaf class.
fa42225a
BB
946 */
947void
948fm_ereport_set(nvlist_t *ereport, int version, const char *erpt_class,
949 uint64_t ena, const nvlist_t *detector, ...)
950{
951 char ereport_class[FM_MAX_CLASS];
952 const char *name;
953 va_list ap;
954 int ret;
955
956 if (version != FM_EREPORT_VERS0) {
957 atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
958 return;
959 }
960
961 (void) snprintf(ereport_class, FM_MAX_CLASS, "%s.%s",
962 FM_EREPORT_CLASS, erpt_class);
963 if (nvlist_add_string(ereport, FM_CLASS, ereport_class) != 0) {
964 atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
965 return;
966 }
967
968 if (nvlist_add_uint64(ereport, FM_EREPORT_ENA, ena)) {
969 atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
970 }
971
972 if (nvlist_add_nvlist(ereport, FM_EREPORT_DETECTOR,
973 (nvlist_t *)detector) != 0) {
974 atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
975 }
976
977 va_start(ap, detector);
978 name = va_arg(ap, const char *);
979 ret = i_fm_payload_set(ereport, name, ap);
980 va_end(ap);
981
982 if (ret)
983 atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
984}
985
986/*
987 * Set-up and validate the members of an hc fmri according to;
988 *
989 * Member name Type Value
990 * ===================================================
991 * version uint8_t 0
992 * auth nvlist_t <auth>
993 * hc-name string <name>
994 * hc-id string <id>
995 *
996 * Note that auth and hc-id are optional members.
997 */
998
999#define HC_MAXPAIRS 20
1000#define HC_MAXNAMELEN 50
1001
1002static int
1003fm_fmri_hc_set_common(nvlist_t *fmri, int version, const nvlist_t *auth)
1004{
1005 if (version != FM_HC_SCHEME_VERSION) {
1006 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1007 return (0);
1008 }
1009
1010 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0 ||
1011 nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_HC) != 0) {
1012 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1013 return (0);
1014 }
1015
1016 if (auth != NULL && nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
1017 (nvlist_t *)auth) != 0) {
1018 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1019 return (0);
1020 }
1021
1022 return (1);
1023}
1024
1025void
1026fm_fmri_hc_set(nvlist_t *fmri, int version, const nvlist_t *auth,
1027 nvlist_t *snvl, int npairs, ...)
1028{
1029 nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
1030 nvlist_t *pairs[HC_MAXPAIRS];
1031 va_list ap;
1032 int i;
1033
1034 if (!fm_fmri_hc_set_common(fmri, version, auth))
1035 return;
1036
1037 npairs = MIN(npairs, HC_MAXPAIRS);
1038
1039 va_start(ap, npairs);
1040 for (i = 0; i < npairs; i++) {
1041 const char *name = va_arg(ap, const char *);
1042 uint32_t id = va_arg(ap, uint32_t);
1043 char idstr[11];
1044
1045 (void) snprintf(idstr, sizeof (idstr), "%u", id);
1046
1047 pairs[i] = fm_nvlist_create(nva);
1048 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
1049 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
1050 atomic_add_64(
1051 &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1052 }
1053 }
1054 va_end(ap);
1055
1056 if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs, npairs) != 0)
1057 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1058
1059 for (i = 0; i < npairs; i++)
1060 fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
1061
1062 if (snvl != NULL) {
1063 if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
1064 atomic_add_64(
1065 &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1066 }
1067 }
1068}
1069
26685276
BB
1070void
1071fm_fmri_hc_create(nvlist_t *fmri, int version, const nvlist_t *auth,
1072 nvlist_t *snvl, nvlist_t *bboard, int npairs, ...)
1073{
1074 nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
1075 nvlist_t *pairs[HC_MAXPAIRS];
1076 nvlist_t **hcl;
1077 uint_t n;
1078 int i, j;
1079 va_list ap;
1080 char *hcname, *hcid;
1081
1082 if (!fm_fmri_hc_set_common(fmri, version, auth))
1083 return;
1084
1085 /*
1086 * copy the bboard nvpairs to the pairs array
1087 */
1088 if (nvlist_lookup_nvlist_array(bboard, FM_FMRI_HC_LIST, &hcl, &n)
1089 != 0) {
1090 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1091 return;
1092 }
1093
1094 for (i = 0; i < n; i++) {
1095 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME,
1096 &hcname) != 0) {
1097 atomic_add_64(
1098 &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1099 return;
1100 }
1101 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, &hcid) != 0) {
1102 atomic_add_64(
1103 &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1104 return;
1105 }
1106
1107 pairs[i] = fm_nvlist_create(nva);
1108 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, hcname) != 0 ||
1109 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, hcid) != 0) {
1110 for (j = 0; j <= i; j++) {
1111 if (pairs[j] != NULL)
1112 fm_nvlist_destroy(pairs[j],
1113 FM_NVA_RETAIN);
1114 }
1115 atomic_add_64(
1116 &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1117 return;
1118 }
1119 }
1120
1121 /*
1122 * create the pairs from passed in pairs
1123 */
1124 npairs = MIN(npairs, HC_MAXPAIRS);
1125
1126 va_start(ap, npairs);
1127 for (i = n; i < npairs + n; i++) {
1128 const char *name = va_arg(ap, const char *);
1129 uint32_t id = va_arg(ap, uint32_t);
1130 char idstr[11];
1131 (void) snprintf(idstr, sizeof (idstr), "%u", id);
1132 pairs[i] = fm_nvlist_create(nva);
1133 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
1134 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
1135 for (j = 0; j <= i; j++) {
1136 if (pairs[j] != NULL)
1137 fm_nvlist_destroy(pairs[j],
1138 FM_NVA_RETAIN);
1139 }
1140 atomic_add_64(
1141 &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1142 return;
1143 }
1144 }
1145 va_end(ap);
1146
1147 /*
1148 * Create the fmri hc list
1149 */
1150 if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs,
1151 npairs + n) != 0) {
1152 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1153 return;
1154 }
1155
1156 for (i = 0; i < npairs + n; i++) {
1157 fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
1158 }
1159
1160 if (snvl != NULL) {
1161 if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
1162 atomic_add_64(
1163 &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1164 return;
1165 }
1166 }
1167}
1168
fa42225a
BB
1169/*
1170 * Set-up and validate the members of an dev fmri according to:
1171 *
1172 * Member name Type Value
1173 * ====================================================
1174 * version uint8_t 0
1175 * auth nvlist_t <auth>
1176 * devpath string <devpath>
428870ff
BB
1177 * [devid] string <devid>
1178 * [target-port-l0id] string <target-port-lun0-id>
fa42225a
BB
1179 *
1180 * Note that auth and devid are optional members.
1181 */
1182void
1183fm_fmri_dev_set(nvlist_t *fmri_dev, int version, const nvlist_t *auth,
428870ff 1184 const char *devpath, const char *devid, const char *tpl0)
fa42225a 1185{
428870ff
BB
1186 int err = 0;
1187
fa42225a
BB
1188 if (version != DEV_SCHEME_VERSION0) {
1189 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1190 return;
1191 }
1192
428870ff
BB
1193 err |= nvlist_add_uint8(fmri_dev, FM_VERSION, version);
1194 err |= nvlist_add_string(fmri_dev, FM_FMRI_SCHEME, FM_FMRI_SCHEME_DEV);
fa42225a
BB
1195
1196 if (auth != NULL) {
428870ff
BB
1197 err |= nvlist_add_nvlist(fmri_dev, FM_FMRI_AUTHORITY,
1198 (nvlist_t *)auth);
fa42225a
BB
1199 }
1200
428870ff 1201 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_PATH, devpath);
fa42225a
BB
1202
1203 if (devid != NULL)
428870ff
BB
1204 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_ID, devid);
1205
1206 if (tpl0 != NULL)
1207 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_TGTPTLUN0, tpl0);
1208
1209 if (err)
1210 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1211
fa42225a
BB
1212}
1213
1214/*
1215 * Set-up and validate the members of an cpu fmri according to:
1216 *
1217 * Member name Type Value
1218 * ====================================================
1219 * version uint8_t 0
1220 * auth nvlist_t <auth>
1221 * cpuid uint32_t <cpu_id>
1222 * cpumask uint8_t <cpu_mask>
1223 * serial uint64_t <serial_id>
1224 *
1225 * Note that auth, cpumask, serial are optional members.
1226 *
1227 */
1228void
1229fm_fmri_cpu_set(nvlist_t *fmri_cpu, int version, const nvlist_t *auth,
1230 uint32_t cpu_id, uint8_t *cpu_maskp, const char *serial_idp)
1231{
1232 uint64_t *failedp = &erpt_kstat_data.fmri_set_failed.value.ui64;
1233
1234 if (version < CPU_SCHEME_VERSION1) {
1235 atomic_add_64(failedp, 1);
1236 return;
1237 }
1238
1239 if (nvlist_add_uint8(fmri_cpu, FM_VERSION, version) != 0) {
1240 atomic_add_64(failedp, 1);
1241 return;
1242 }
1243
1244 if (nvlist_add_string(fmri_cpu, FM_FMRI_SCHEME,
1245 FM_FMRI_SCHEME_CPU) != 0) {
1246 atomic_add_64(failedp, 1);
1247 return;
1248 }
1249
1250 if (auth != NULL && nvlist_add_nvlist(fmri_cpu, FM_FMRI_AUTHORITY,
1251 (nvlist_t *)auth) != 0)
1252 atomic_add_64(failedp, 1);
1253
1254 if (nvlist_add_uint32(fmri_cpu, FM_FMRI_CPU_ID, cpu_id) != 0)
1255 atomic_add_64(failedp, 1);
1256
1257 if (cpu_maskp != NULL && nvlist_add_uint8(fmri_cpu, FM_FMRI_CPU_MASK,
1258 *cpu_maskp) != 0)
1259 atomic_add_64(failedp, 1);
1260
1261 if (serial_idp == NULL || nvlist_add_string(fmri_cpu,
1262 FM_FMRI_CPU_SERIAL_ID, (char *)serial_idp) != 0)
1263 atomic_add_64(failedp, 1);
1264}
1265
1266/*
1267 * Set-up and validate the members of a mem according to:
1268 *
1269 * Member name Type Value
1270 * ====================================================
1271 * version uint8_t 0
1272 * auth nvlist_t <auth> [optional]
1273 * unum string <unum>
1274 * serial string <serial> [optional*]
1275 * offset uint64_t <offset> [optional]
1276 *
1277 * * serial is required if offset is present
1278 */
1279void
1280fm_fmri_mem_set(nvlist_t *fmri, int version, const nvlist_t *auth,
1281 const char *unum, const char *serial, uint64_t offset)
1282{
1283 if (version != MEM_SCHEME_VERSION0) {
1284 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1285 return;
1286 }
1287
1288 if (!serial && (offset != (uint64_t)-1)) {
1289 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1290 return;
1291 }
1292
1293 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
1294 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1295 return;
1296 }
1297
1298 if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_MEM) != 0) {
1299 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1300 return;
1301 }
1302
1303 if (auth != NULL) {
1304 if (nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
1305 (nvlist_t *)auth) != 0) {
1306 atomic_add_64(
1307 &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1308 }
1309 }
1310
1311 if (nvlist_add_string(fmri, FM_FMRI_MEM_UNUM, unum) != 0) {
1312 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1313 }
1314
1315 if (serial != NULL) {
1316 if (nvlist_add_string_array(fmri, FM_FMRI_MEM_SERIAL_ID,
1317 (char **)&serial, 1) != 0) {
1318 atomic_add_64(
1319 &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1320 }
1321 if (offset != (uint64_t)-1) {
1322 if (nvlist_add_uint64(fmri, FM_FMRI_MEM_OFFSET,
1323 offset) != 0) {
1324 atomic_add_64(&erpt_kstat_data.
1325 fmri_set_failed.value.ui64, 1);
1326 }
1327 }
1328 }
1329}
1330
1331void
1332fm_fmri_zfs_set(nvlist_t *fmri, int version, uint64_t pool_guid,
1333 uint64_t vdev_guid)
1334{
1335 if (version != ZFS_SCHEME_VERSION0) {
1336 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1337 return;
1338 }
1339
1340 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
1341 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1342 return;
1343 }
1344
1345 if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_ZFS) != 0) {
1346 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1347 return;
1348 }
1349
1350 if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_POOL, pool_guid) != 0) {
1351 atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1352 }
1353
1354 if (vdev_guid != 0) {
1355 if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_VDEV, vdev_guid) != 0) {
1356 atomic_add_64(
1357 &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1358 }
1359 }
1360}
1361
1362uint64_t
1363fm_ena_increment(uint64_t ena)
1364{
1365 uint64_t new_ena;
1366
1367 switch (ENA_FORMAT(ena)) {
1368 case FM_ENA_FMT1:
1369 new_ena = ena + (1 << ENA_FMT1_GEN_SHFT);
1370 break;
1371 case FM_ENA_FMT2:
1372 new_ena = ena + (1 << ENA_FMT2_GEN_SHFT);
1373 break;
1374 default:
1375 new_ena = 0;
1376 }
1377
1378 return (new_ena);
1379}
1380
1381uint64_t
1382fm_ena_generate_cpu(uint64_t timestamp, processorid_t cpuid, uchar_t format)
1383{
1384 uint64_t ena = 0;
1385
1386 switch (format) {
1387 case FM_ENA_FMT1:
1388 if (timestamp) {
1389 ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1390 ((cpuid << ENA_FMT1_CPUID_SHFT) &
1391 ENA_FMT1_CPUID_MASK) |
1392 ((timestamp << ENA_FMT1_TIME_SHFT) &
1393 ENA_FMT1_TIME_MASK));
1394 } else {
1395 ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1396 ((cpuid << ENA_FMT1_CPUID_SHFT) &
1397 ENA_FMT1_CPUID_MASK) |
26685276 1398 ((gethrtime() << ENA_FMT1_TIME_SHFT) &
fa42225a
BB
1399 ENA_FMT1_TIME_MASK));
1400 }
1401 break;
1402 case FM_ENA_FMT2:
1403 ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1404 ((timestamp << ENA_FMT2_TIME_SHFT) & ENA_FMT2_TIME_MASK));
1405 break;
1406 default:
1407 break;
1408 }
1409
1410 return (ena);
1411}
1412
1413uint64_t
1414fm_ena_generate(uint64_t timestamp, uchar_t format)
1415{
26685276 1416 return (fm_ena_generate_cpu(timestamp, getcpuid(), format));
fa42225a
BB
1417}
1418
1419uint64_t
1420fm_ena_generation_get(uint64_t ena)
1421{
1422 uint64_t gen;
1423
1424 switch (ENA_FORMAT(ena)) {
1425 case FM_ENA_FMT1:
1426 gen = (ena & ENA_FMT1_GEN_MASK) >> ENA_FMT1_GEN_SHFT;
1427 break;
1428 case FM_ENA_FMT2:
1429 gen = (ena & ENA_FMT2_GEN_MASK) >> ENA_FMT2_GEN_SHFT;
1430 break;
1431 default:
1432 gen = 0;
1433 break;
1434 }
1435
1436 return (gen);
1437}
1438
1439uchar_t
1440fm_ena_format_get(uint64_t ena)
1441{
1442
1443 return (ENA_FORMAT(ena));
1444}
1445
1446uint64_t
1447fm_ena_id_get(uint64_t ena)
1448{
1449 uint64_t id;
1450
1451 switch (ENA_FORMAT(ena)) {
1452 case FM_ENA_FMT1:
1453 id = (ena & ENA_FMT1_ID_MASK) >> ENA_FMT1_ID_SHFT;
1454 break;
1455 case FM_ENA_FMT2:
1456 id = (ena & ENA_FMT2_ID_MASK) >> ENA_FMT2_ID_SHFT;
1457 break;
1458 default:
1459 id = 0;
1460 }
1461
1462 return (id);
1463}
1464
1465uint64_t
1466fm_ena_time_get(uint64_t ena)
1467{
1468 uint64_t time;
1469
1470 switch (ENA_FORMAT(ena)) {
1471 case FM_ENA_FMT1:
1472 time = (ena & ENA_FMT1_TIME_MASK) >> ENA_FMT1_TIME_SHFT;
1473 break;
1474 case FM_ENA_FMT2:
1475 time = (ena & ENA_FMT2_TIME_MASK) >> ENA_FMT2_TIME_SHFT;
1476 break;
1477 default:
1478 time = 0;
1479 }
1480
1481 return (time);
1482}
1483
26685276 1484#ifdef _KERNEL
fa42225a 1485void
26685276 1486fm_init(void)
fa42225a 1487{
26685276
BB
1488 zevent_len_cur = 0;
1489 zevent_flags = 0;
fa42225a 1490
c409e464
BB
1491 if (zfs_zevent_len_max == 0)
1492 zfs_zevent_len_max = ERPT_MAX_ERRS * MAX(max_ncpus, 4);
fa42225a 1493
26685276
BB
1494 /* Initialize zevent allocation and generation kstats */
1495 fm_ksp = kstat_create("zfs", 0, "fm", "misc", KSTAT_TYPE_NAMED,
1496 sizeof (struct erpt_kstat) / sizeof (kstat_named_t),
1497 KSTAT_FLAG_VIRTUAL);
1498
1499 if (fm_ksp != NULL) {
1500 fm_ksp->ks_data = &erpt_kstat_data;
1501 kstat_install(fm_ksp);
1502 } else {
1503 cmn_err(CE_NOTE, "failed to create fm/misc kstat\n");
1504 }
1505
1506 mutex_init(&zevent_lock, NULL, MUTEX_DEFAULT, NULL);
1507 list_create(&zevent_list, sizeof(zevent_t), offsetof(zevent_t, ev_node));
1508 cv_init(&zevent_cv, NULL, CV_DEFAULT, NULL);
fa42225a 1509}
428870ff
BB
1510
1511void
26685276 1512fm_fini(void)
428870ff 1513{
26685276 1514 int count;
428870ff 1515
26685276
BB
1516 zfs_zevent_drain_all(&count);
1517 cv_broadcast(&zevent_cv);
428870ff 1518
26685276
BB
1519 mutex_enter(&zevent_lock);
1520 zevent_flags |= ZEVENT_SHUTDOWN;
1521 while (zevent_waiters > 0) {
1522 mutex_exit(&zevent_lock);
1523 schedule();
1524 mutex_enter(&zevent_lock);
428870ff 1525 }
26685276 1526 mutex_exit(&zevent_lock);
428870ff 1527
26685276
BB
1528 cv_destroy(&zevent_cv);
1529 list_destroy(&zevent_list);
1530 mutex_destroy(&zevent_lock);
428870ff 1531
26685276
BB
1532 if (fm_ksp != NULL) {
1533 kstat_delete(fm_ksp);
1534 fm_ksp = NULL;
428870ff 1535 }
26685276 1536}
428870ff 1537
c409e464
BB
1538module_param(zfs_zevent_len_max, int, 0644);
1539MODULE_PARM_DESC(zfs_zevent_len_max, "Max event queue length");
428870ff 1540
c409e464
BB
1541module_param(zfs_zevent_cols, int, 0644);
1542MODULE_PARM_DESC(zfs_zevent_cols, "Max event column width");
428870ff 1543
c409e464
BB
1544module_param(zfs_zevent_console, int, 0644);
1545MODULE_PARM_DESC(zfs_zevent_console, "Log events to the console");
428870ff 1546
26685276 1547#endif /* _KERNEL */