]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/fm.c
vdev_disk: don't touch vbio after its handed off to the kernel
[mirror_zfs.git] / module / zfs / fm.c
CommitLineData
fa42225a
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
1d3ba0bf 9 * or https://opensource.org/licenses/CDDL-1.0.
fa42225a
BB
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
fa42225a
BB
23 */
24
25/*
26 * Fault Management Architecture (FMA) Resource and Protocol Support
27 *
28 * The routines contained herein provide services to support kernel subsystems
29 * in publishing fault management telemetry (see PSARC 2002/412 and 2003/089).
30 *
31 * Name-Value Pair Lists
32 *
33 * The embodiment of an FMA protocol element (event, fmri or authority) is a
e1cfd73f 34 * name-value pair list (nvlist_t). FMA-specific nvlist constructor and
fa42225a
BB
35 * destructor functions, fm_nvlist_create() and fm_nvlist_destroy(), are used
36 * to create an nvpair list using custom allocators. Callers may choose to
37 * allocate either from the kernel memory allocator, or from a preallocated
38 * buffer, useful in constrained contexts like high-level interrupt routines.
39 *
40 * Protocol Event and FMRI Construction
41 *
42 * Convenience routines are provided to construct nvlist events according to
43 * the FMA Event Protocol and Naming Schema specification for ereports and
44 * FMRIs for the dev, cpu, hc, mem, legacy hc and de schemes.
45 *
46 * ENA Manipulation
47 *
48 * Routines to generate ENA formats 0, 1 and 2 are available as well as
49 * routines to increment formats 1 and 2. Individual fields within the
50 * ENA are extractable via fm_ena_time_get(), fm_ena_id_get(),
51 * fm_ena_format_get() and fm_ena_gen_get().
52 */
53
54#include <sys/types.h>
55#include <sys/time.h>
26685276 56#include <sys/list.h>
fa42225a
BB
57#include <sys/nvpair.h>
58#include <sys/cmn_err.h>
fa42225a 59#include <sys/sysmacros.h>
fa42225a
BB
60#include <sys/sunddi.h>
61#include <sys/systeminfo.h>
fa42225a
BB
62#include <sys/fm/util.h>
63#include <sys/fm/protocol.h>
26685276
BB
64#include <sys/kstat.h>
65#include <sys/zfs_context.h>
66#ifdef _KERNEL
67#include <sys/atomic.h>
68#include <sys/condvar.h>
26685276 69#include <sys/zfs_ioctl.h>
fa42225a 70
fdc2d303 71static uint_t zfs_zevent_len_max = 512;
fa42225a 72
fdc2d303 73static uint_t zevent_len_cur = 0;
26685276
BB
74static int zevent_waiters = 0;
75static int zevent_flags = 0;
fa42225a 76
6078881a
TH
77/* Num events rate limited since the last time zfs_zevent_next() was called */
78static uint64_t ratelimit_dropped = 0;
79
a2f1945e
BB
80/*
81 * The EID (Event IDentifier) is used to uniquely tag a zevent when it is
82 * posted. The posted EIDs are monotonically increasing but not persistent.
83 * They will be reset to the initial value (1) each time the kernel module is
84 * loaded.
85 */
86static uint64_t zevent_eid = 0;
87
26685276
BB
88static kmutex_t zevent_lock;
89static list_t zevent_list;
90static kcondvar_t zevent_cv;
91#endif /* _KERNEL */
fa42225a 92
428870ff 93
fa42225a 94/*
26685276 95 * Common fault management kstats to record event generation failures
fa42225a
BB
96 */
97
98struct erpt_kstat {
99 kstat_named_t erpt_dropped; /* num erpts dropped on post */
100 kstat_named_t erpt_set_failed; /* num erpt set failures */
101 kstat_named_t fmri_set_failed; /* num fmri set failures */
102 kstat_named_t payload_set_failed; /* num payload set failures */
4f072827 103 kstat_named_t erpt_duplicates; /* num duplicate erpts */
fa42225a
BB
104};
105
106static struct erpt_kstat erpt_kstat_data = {
107 { "erpt-dropped", KSTAT_DATA_UINT64 },
108 { "erpt-set-failed", KSTAT_DATA_UINT64 },
109 { "fmri-set-failed", KSTAT_DATA_UINT64 },
4f072827
DB
110 { "payload-set-failed", KSTAT_DATA_UINT64 },
111 { "erpt-duplicates", KSTAT_DATA_UINT64 }
fa42225a
BB
112};
113
26685276 114kstat_t *fm_ksp;
fa42225a 115
26685276 116#ifdef _KERNEL
fa42225a 117
26685276
BB
118static zevent_t *
119zfs_zevent_alloc(void)
120{
121 zevent_t *ev;
122
79c76d5b 123 ev = kmem_zalloc(sizeof (zevent_t), KM_SLEEP);
26685276 124
d1d7e268 125 list_create(&ev->ev_ze_list, sizeof (zfs_zevent_t),
02730c33 126 offsetof(zfs_zevent_t, ze_node));
26685276
BB
127 list_link_init(&ev->ev_node);
128
d1d7e268 129 return (ev);
26685276
BB
130}
131
132static void
133zfs_zevent_free(zevent_t *ev)
134{
135 /* Run provided cleanup callback */
136 ev->ev_cb(ev->ev_nvl, ev->ev_detector);
137
138 list_destroy(&ev->ev_ze_list);
d1d7e268 139 kmem_free(ev, sizeof (zevent_t));
26685276
BB
140}
141
142static void
143zfs_zevent_drain(zevent_t *ev)
144{
145 zfs_zevent_t *ze;
146
147 ASSERT(MUTEX_HELD(&zevent_lock));
148 list_remove(&zevent_list, ev);
149
150 /* Remove references to this event in all private file data */
b3ad3f48 151 while ((ze = list_remove_head(&ev->ev_ze_list)) != NULL) {
26685276
BB
152 ze->ze_zevent = NULL;
153 ze->ze_dropped++;
154 }
155
156 zfs_zevent_free(ev);
157}
158
fa42225a 159void
fdc2d303 160zfs_zevent_drain_all(uint_t *count)
fa42225a 161{
26685276 162 zevent_t *ev;
fa42225a 163
26685276
BB
164 mutex_enter(&zevent_lock);
165 while ((ev = list_head(&zevent_list)) != NULL)
166 zfs_zevent_drain(ev);
167
168 *count = zevent_len_cur;
169 zevent_len_cur = 0;
170 mutex_exit(&zevent_lock);
fa42225a
BB
171}
172
572e2857 173/*
26685276
BB
174 * New zevents are inserted at the head. If the maximum queue
175 * length is exceeded a zevent will be drained from the tail.
176 * As part of this any user space processes which currently have
177 * a reference to this zevent_t in their private data will have
178 * this reference set to NULL.
572e2857 179 */
26685276
BB
180static void
181zfs_zevent_insert(zevent_t *ev)
572e2857 182{
99db9bfd 183 ASSERT(MUTEX_HELD(&zevent_lock));
26685276 184 list_insert_head(&zevent_list, ev);
99db9bfd 185
c409e464 186 if (zevent_len_cur >= zfs_zevent_len_max)
26685276 187 zfs_zevent_drain(list_tail(&zevent_list));
572e2857 188 else
26685276 189 zevent_len_cur++;
572e2857
BB
190}
191
fa42225a 192/*
0426c168
IH
193 * Post a zevent. The cb will be called when nvl and detector are no longer
194 * needed, i.e.:
195 * - An error happened and a zevent can't be posted. In this case, cb is called
196 * before zfs_zevent_post() returns.
197 * - The event is being drained and freed.
fa42225a 198 */
0426c168 199int
26685276 200zfs_zevent_post(nvlist_t *nvl, nvlist_t *detector, zevent_cb_t *cb)
fa42225a 201{
6413c95f 202 inode_timespec_t tv;
26685276 203 int64_t tv_array[2];
a2f1945e 204 uint64_t eid;
26685276
BB
205 size_t nvl_size = 0;
206 zevent_t *ev;
0426c168
IH
207 int error;
208
209 ASSERT(cb != NULL);
fa42225a 210
26685276
BB
211 gethrestime(&tv);
212 tv_array[0] = tv.tv_sec;
213 tv_array[1] = tv.tv_nsec;
0426c168
IH
214
215 error = nvlist_add_int64_array(nvl, FM_EREPORT_TIME, tv_array, 2);
216 if (error) {
bc89ac84 217 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
0426c168 218 goto out;
26685276 219 }
fa42225a 220
a2f1945e 221 eid = atomic_inc_64_nv(&zevent_eid);
0426c168
IH
222 error = nvlist_add_uint64(nvl, FM_EREPORT_EID, eid);
223 if (error) {
bc89ac84 224 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
0426c168
IH
225 goto out;
226 }
227
228 error = nvlist_size(nvl, &nvl_size, NV_ENCODE_NATIVE);
229 if (error) {
bc89ac84 230 atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
0426c168 231 goto out;
a2f1945e
BB
232 }
233
26685276 234 if (nvl_size > ERPT_DATA_SZ || nvl_size == 0) {
bc89ac84 235 atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
0426c168
IH
236 error = EOVERFLOW;
237 goto out;
fa42225a
BB
238 }
239
26685276
BB
240 ev = zfs_zevent_alloc();
241 if (ev == NULL) {
bc89ac84 242 atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
0426c168
IH
243 error = ENOMEM;
244 goto out;
26685276 245 }
fa42225a 246
d1d7e268 247 ev->ev_nvl = nvl;
26685276
BB
248 ev->ev_detector = detector;
249 ev->ev_cb = cb;
a2f1945e 250 ev->ev_eid = eid;
99db9bfd
BB
251
252 mutex_enter(&zevent_lock);
26685276
BB
253 zfs_zevent_insert(ev);
254 cv_broadcast(&zevent_cv);
99db9bfd 255 mutex_exit(&zevent_lock);
0426c168
IH
256
257out:
258 if (error)
259 cb(nvl, detector);
260
261 return (error);
26685276 262}
fa42225a 263
4f072827
DB
264void
265zfs_zevent_track_duplicate(void)
266{
267 atomic_inc_64(&erpt_kstat_data.erpt_duplicates.value.ui64);
268}
269
26685276
BB
270static int
271zfs_zevent_minor_to_state(minor_t minor, zfs_zevent_t **ze)
272{
273 *ze = zfsdev_get_state(minor, ZST_ZEVENT);
274 if (*ze == NULL)
ecb2b7dc 275 return (SET_ERROR(EBADF));
fa42225a 276
26685276
BB
277 return (0);
278}
fa42225a 279
958826be 280zfs_file_t *
26685276
BB
281zfs_zevent_fd_hold(int fd, minor_t *minorp, zfs_zevent_t **ze)
282{
958826be
GW
283 zfs_file_t *fp = zfs_file_get(fd);
284 if (fp == NULL)
285 return (NULL);
26685276 286
958826be 287 int error = zfsdev_getminor(fp, minorp);
72540ea3
RY
288 if (error == 0)
289 error = zfs_zevent_minor_to_state(*minorp, ze);
26685276 290
958826be
GW
291 if (error) {
292 zfs_zevent_fd_rele(fp);
293 fp = NULL;
294 }
26685276 295
958826be 296 return (fp);
26685276
BB
297}
298
299void
958826be 300zfs_zevent_fd_rele(zfs_file_t *fp)
26685276 301{
958826be 302 zfs_file_put(fp);
fa42225a
BB
303}
304
305/*
baa40d45
BB
306 * Get the next zevent in the stream and place a copy in 'event'. This
307 * may fail with ENOMEM if the encoded nvlist size exceeds the passed
308 * 'event_size'. In this case the stream pointer is not advanced and
309 * and 'event_size' is set to the minimum required buffer size.
fa42225a 310 */
26685276 311int
baa40d45 312zfs_zevent_next(zfs_zevent_t *ze, nvlist_t **event, uint64_t *event_size,
d1d7e268 313 uint64_t *dropped)
fa42225a 314{
26685276 315 zevent_t *ev;
baa40d45
BB
316 size_t size;
317 int error = 0;
26685276
BB
318
319 mutex_enter(&zevent_lock);
320 if (ze->ze_zevent == NULL) {
321 /* New stream start at the beginning/tail */
322 ev = list_tail(&zevent_list);
323 if (ev == NULL) {
324 error = ENOENT;
325 goto out;
326 }
fa42225a 327 } else {
d1d7e268
MK
328 /*
329 * Existing stream continue with the next element and remove
330 * ourselves from the wait queue for the previous element
331 */
26685276
BB
332 ev = list_prev(&zevent_list, ze->ze_zevent);
333 if (ev == NULL) {
334 error = ENOENT;
335 goto out;
336 }
baa40d45 337 }
26685276 338
baa40d45
BB
339 VERIFY(nvlist_size(ev->ev_nvl, &size, NV_ENCODE_NATIVE) == 0);
340 if (size > *event_size) {
341 *event_size = size;
342 error = ENOMEM;
343 goto out;
fa42225a
BB
344 }
345
baa40d45
BB
346 if (ze->ze_zevent)
347 list_remove(&ze->ze_zevent->ev_ze_list, ze);
348
26685276
BB
349 ze->ze_zevent = ev;
350 list_insert_head(&ev->ev_ze_list, ze);
aecdc706 351 (void) nvlist_dup(ev->ev_nvl, event, KM_SLEEP);
26685276 352 *dropped = ze->ze_dropped;
6078881a
TH
353
354#ifdef _KERNEL
355 /* Include events dropped due to rate limiting */
3ba10f9a 356 *dropped += atomic_swap_64(&ratelimit_dropped, 0);
6078881a 357#endif
26685276
BB
358 ze->ze_dropped = 0;
359out:
360 mutex_exit(&zevent_lock);
fa42225a 361
d1d7e268 362 return (error);
26685276
BB
363}
364
d441e85d
BB
365/*
366 * Wait in an interruptible state for any new events.
367 */
26685276
BB
368int
369zfs_zevent_wait(zfs_zevent_t *ze)
370{
d441e85d 371 int error = EAGAIN;
26685276
BB
372
373 mutex_enter(&zevent_lock);
d441e85d 374 zevent_waiters++;
fa42225a 375
d441e85d
BB
376 while (error == EAGAIN) {
377 if (zevent_flags & ZEVENT_SHUTDOWN) {
378 error = SET_ERROR(ESHUTDOWN);
379 break;
380 }
fa42225a 381
2e7f664f 382 if (cv_wait_sig(&zevent_cv, &zevent_lock) == 0) {
d441e85d
BB
383 error = SET_ERROR(EINTR);
384 break;
385 } else if (!list_is_empty(&zevent_list)) {
386 error = 0;
387 continue;
388 } else {
389 error = EAGAIN;
390 }
391 }
26685276
BB
392
393 zevent_waiters--;
26685276
BB
394 mutex_exit(&zevent_lock);
395
d1d7e268 396 return (error);
fa42225a
BB
397}
398
75e3ff58
BB
399/*
400 * The caller may seek to a specific EID by passing that EID. If the EID
401 * is still available in the posted list of events the cursor is positioned
402 * there. Otherwise ENOENT is returned and the cursor is not moved.
403 *
404 * There are two reserved EIDs which may be passed and will never fail.
405 * ZEVENT_SEEK_START positions the cursor at the start of the list, and
406 * ZEVENT_SEEK_END positions the cursor at the end of the list.
407 */
408int
409zfs_zevent_seek(zfs_zevent_t *ze, uint64_t eid)
410{
411 zevent_t *ev;
412 int error = 0;
413
414 mutex_enter(&zevent_lock);
415
416 if (eid == ZEVENT_SEEK_START) {
417 if (ze->ze_zevent)
418 list_remove(&ze->ze_zevent->ev_ze_list, ze);
419
420 ze->ze_zevent = NULL;
421 goto out;
422 }
423
424 if (eid == ZEVENT_SEEK_END) {
425 if (ze->ze_zevent)
426 list_remove(&ze->ze_zevent->ev_ze_list, ze);
427
428 ev = list_head(&zevent_list);
429 if (ev) {
430 ze->ze_zevent = ev;
431 list_insert_head(&ev->ev_ze_list, ze);
432 } else {
433 ze->ze_zevent = NULL;
434 }
435
436 goto out;
437 }
438
439 for (ev = list_tail(&zevent_list); ev != NULL;
440 ev = list_prev(&zevent_list, ev)) {
441 if (ev->ev_eid == eid) {
442 if (ze->ze_zevent)
443 list_remove(&ze->ze_zevent->ev_ze_list, ze);
444
445 ze->ze_zevent = ev;
446 list_insert_head(&ev->ev_ze_list, ze);
447 break;
448 }
449 }
450
451 if (ev == NULL)
452 error = ENOENT;
453
454out:
455 mutex_exit(&zevent_lock);
456
457 return (error);
458}
459
fa42225a 460void
26685276 461zfs_zevent_init(zfs_zevent_t **zep)
fa42225a 462{
26685276 463 zfs_zevent_t *ze;
fa42225a 464
26685276
BB
465 ze = *zep = kmem_zalloc(sizeof (zfs_zevent_t), KM_SLEEP);
466 list_link_init(&ze->ze_node);
467}
fa42225a 468
26685276
BB
469void
470zfs_zevent_destroy(zfs_zevent_t *ze)
471{
472 mutex_enter(&zevent_lock);
473 if (ze->ze_zevent)
474 list_remove(&ze->ze_zevent->ev_ze_list, ze);
475 mutex_exit(&zevent_lock);
fa42225a 476
26685276 477 kmem_free(ze, sizeof (zfs_zevent_t));
fa42225a 478}
26685276 479#endif /* _KERNEL */
fa42225a
BB
480
481/*
e1cfd73f 482 * Wrappers for FM nvlist allocators
fa42225a 483 */
fa42225a
BB
484static void *
485i_fm_alloc(nv_alloc_t *nva, size_t size)
486{
14e4e3cb 487 (void) nva;
673aa7e6 488 return (kmem_alloc(size, KM_SLEEP));
fa42225a
BB
489}
490
fa42225a
BB
491static void
492i_fm_free(nv_alloc_t *nva, void *buf, size_t size)
493{
14e4e3cb 494 (void) nva;
fa42225a
BB
495 kmem_free(buf, size);
496}
497
18168da7 498static const nv_alloc_ops_t fm_mem_alloc_ops = {
56d8d8ac
MW
499 .nv_ao_init = NULL,
500 .nv_ao_fini = NULL,
501 .nv_ao_alloc = i_fm_alloc,
502 .nv_ao_free = i_fm_free,
503 .nv_ao_reset = NULL
fa42225a
BB
504};
505
506/*
507 * Create and initialize a new nv_alloc_t for a fixed buffer, buf. A pointer
508 * to the newly allocated nv_alloc_t structure is returned upon success or NULL
509 * is returned to indicate that the nv_alloc structure could not be created.
510 */
511nv_alloc_t *
512fm_nva_xcreate(char *buf, size_t bufsz)
513{
514 nv_alloc_t *nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP);
515
516 if (bufsz == 0 || nv_alloc_init(nvhdl, nv_fixed_ops, buf, bufsz) != 0) {
517 kmem_free(nvhdl, sizeof (nv_alloc_t));
518 return (NULL);
519 }
520
521 return (nvhdl);
522}
523
524/*
525 * Destroy a previously allocated nv_alloc structure. The fixed buffer
526 * associated with nva must be freed by the caller.
527 */
528void
529fm_nva_xdestroy(nv_alloc_t *nva)
530{
531 nv_alloc_fini(nva);
532 kmem_free(nva, sizeof (nv_alloc_t));
533}
534
535/*
536 * Create a new nv list. A pointer to a new nv list structure is returned
537 * upon success or NULL is returned to indicate that the structure could
538 * not be created. The newly created nv list is created and managed by the
539 * operations installed in nva. If nva is NULL, the default FMA nva
540 * operations are installed and used.
541 *
542 * When called from the kernel and nva == NULL, this function must be called
543 * from passive kernel context with no locks held that can prevent a
544 * sleeping memory allocation from occurring. Otherwise, this function may
545 * be called from other kernel contexts as long a valid nva created via
546 * fm_nva_create() is supplied.
547 */
548nvlist_t *
549fm_nvlist_create(nv_alloc_t *nva)
550{
551 int hdl_alloced = 0;
552 nvlist_t *nvl;
553 nv_alloc_t *nvhdl;
554
555 if (nva == NULL) {
79c76d5b 556 nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP);
fa42225a
BB
557
558 if (nv_alloc_init(nvhdl, &fm_mem_alloc_ops, NULL, 0) != 0) {
559 kmem_free(nvhdl, sizeof (nv_alloc_t));
560 return (NULL);
561 }
562 hdl_alloced = 1;
563 } else {
564 nvhdl = nva;
565 }
566
567 if (nvlist_xalloc(&nvl, NV_UNIQUE_NAME, nvhdl) != 0) {
568 if (hdl_alloced) {
fa42225a 569 nv_alloc_fini(nvhdl);
572e2857 570 kmem_free(nvhdl, sizeof (nv_alloc_t));
fa42225a
BB
571 }
572 return (NULL);
573 }
574
575 return (nvl);
576}
577
578/*
579 * Destroy a previously allocated nvlist structure. flag indicates whether
580 * or not the associated nva structure should be freed (FM_NVA_FREE) or
581 * retained (FM_NVA_RETAIN). Retaining the nv alloc structure allows
582 * it to be re-used for future nvlist creation operations.
583 */
584void
585fm_nvlist_destroy(nvlist_t *nvl, int flag)
586{
587 nv_alloc_t *nva = nvlist_lookup_nv_alloc(nvl);
588
589 nvlist_free(nvl);
590
591 if (nva != NULL) {
592 if (flag == FM_NVA_FREE)
593 fm_nva_xdestroy(nva);
594 }
595}
596
597int
598i_fm_payload_set(nvlist_t *payload, const char *name, va_list ap)
599{
600 int nelem, ret = 0;
601 data_type_t type;
602
603 while (ret == 0 && name != NULL) {
604 type = va_arg(ap, data_type_t);
605 switch (type) {
606 case DATA_TYPE_BYTE:
607 ret = nvlist_add_byte(payload, name,
608 va_arg(ap, uint_t));
609 break;
610 case DATA_TYPE_BYTE_ARRAY:
611 nelem = va_arg(ap, int);
612 ret = nvlist_add_byte_array(payload, name,
613 va_arg(ap, uchar_t *), nelem);
614 break;
615 case DATA_TYPE_BOOLEAN_VALUE:
616 ret = nvlist_add_boolean_value(payload, name,
617 va_arg(ap, boolean_t));
618 break;
619 case DATA_TYPE_BOOLEAN_ARRAY:
620 nelem = va_arg(ap, int);
621 ret = nvlist_add_boolean_array(payload, name,
622 va_arg(ap, boolean_t *), nelem);
623 break;
624 case DATA_TYPE_INT8:
625 ret = nvlist_add_int8(payload, name,
626 va_arg(ap, int));
627 break;
628 case DATA_TYPE_INT8_ARRAY:
629 nelem = va_arg(ap, int);
630 ret = nvlist_add_int8_array(payload, name,
631 va_arg(ap, int8_t *), nelem);
632 break;
633 case DATA_TYPE_UINT8:
634 ret = nvlist_add_uint8(payload, name,
635 va_arg(ap, uint_t));
636 break;
637 case DATA_TYPE_UINT8_ARRAY:
638 nelem = va_arg(ap, int);
639 ret = nvlist_add_uint8_array(payload, name,
640 va_arg(ap, uint8_t *), nelem);
641 break;
642 case DATA_TYPE_INT16:
643 ret = nvlist_add_int16(payload, name,
644 va_arg(ap, int));
645 break;
646 case DATA_TYPE_INT16_ARRAY:
647 nelem = va_arg(ap, int);
648 ret = nvlist_add_int16_array(payload, name,
649 va_arg(ap, int16_t *), nelem);
650 break;
651 case DATA_TYPE_UINT16:
652 ret = nvlist_add_uint16(payload, name,
653 va_arg(ap, uint_t));
654 break;
655 case DATA_TYPE_UINT16_ARRAY:
656 nelem = va_arg(ap, int);
657 ret = nvlist_add_uint16_array(payload, name,
658 va_arg(ap, uint16_t *), nelem);
659 break;
660 case DATA_TYPE_INT32:
661 ret = nvlist_add_int32(payload, name,
662 va_arg(ap, int32_t));
663 break;
664 case DATA_TYPE_INT32_ARRAY:
665 nelem = va_arg(ap, int);
666 ret = nvlist_add_int32_array(payload, name,
667 va_arg(ap, int32_t *), nelem);
668 break;
669 case DATA_TYPE_UINT32:
670 ret = nvlist_add_uint32(payload, name,
671 va_arg(ap, uint32_t));
672 break;
673 case DATA_TYPE_UINT32_ARRAY:
674 nelem = va_arg(ap, int);
675 ret = nvlist_add_uint32_array(payload, name,
676 va_arg(ap, uint32_t *), nelem);
677 break;
678 case DATA_TYPE_INT64:
679 ret = nvlist_add_int64(payload, name,
680 va_arg(ap, int64_t));
681 break;
682 case DATA_TYPE_INT64_ARRAY:
683 nelem = va_arg(ap, int);
684 ret = nvlist_add_int64_array(payload, name,
685 va_arg(ap, int64_t *), nelem);
686 break;
687 case DATA_TYPE_UINT64:
688 ret = nvlist_add_uint64(payload, name,
689 va_arg(ap, uint64_t));
690 break;
691 case DATA_TYPE_UINT64_ARRAY:
692 nelem = va_arg(ap, int);
693 ret = nvlist_add_uint64_array(payload, name,
694 va_arg(ap, uint64_t *), nelem);
695 break;
696 case DATA_TYPE_STRING:
697 ret = nvlist_add_string(payload, name,
698 va_arg(ap, char *));
699 break;
700 case DATA_TYPE_STRING_ARRAY:
701 nelem = va_arg(ap, int);
702 ret = nvlist_add_string_array(payload, name,
795075e6 703 va_arg(ap, const char **), nelem);
fa42225a
BB
704 break;
705 case DATA_TYPE_NVLIST:
706 ret = nvlist_add_nvlist(payload, name,
707 va_arg(ap, nvlist_t *));
708 break;
709 case DATA_TYPE_NVLIST_ARRAY:
710 nelem = va_arg(ap, int);
711 ret = nvlist_add_nvlist_array(payload, name,
795075e6 712 va_arg(ap, const nvlist_t **), nelem);
fa42225a
BB
713 break;
714 default:
715 ret = EINVAL;
716 }
717
718 name = va_arg(ap, char *);
719 }
720 return (ret);
721}
722
723void
724fm_payload_set(nvlist_t *payload, ...)
725{
726 int ret;
727 const char *name;
728 va_list ap;
729
730 va_start(ap, payload);
731 name = va_arg(ap, char *);
732 ret = i_fm_payload_set(payload, name, ap);
733 va_end(ap);
734
735 if (ret)
bc89ac84 736 atomic_inc_64(&erpt_kstat_data.payload_set_failed.value.ui64);
fa42225a
BB
737}
738
739/*
740 * Set-up and validate the members of an ereport event according to:
741 *
742 * Member name Type Value
743 * ====================================================
744 * class string ereport
745 * version uint8_t 0
746 * ena uint64_t <ena>
747 * detector nvlist_t <detector>
748 * ereport-payload nvlist_t <var args>
749 *
428870ff
BB
750 * We don't actually add a 'version' member to the payload. Really,
751 * the version quoted to us by our caller is that of the category 1
752 * "ereport" event class (and we require FM_EREPORT_VERS0) but
753 * the payload version of the actual leaf class event under construction
754 * may be something else. Callers should supply a version in the varargs,
755 * or (better) we could take two version arguments - one for the
756 * ereport category 1 classification (expect FM_EREPORT_VERS0) and one
757 * for the leaf class.
fa42225a
BB
758 */
759void
760fm_ereport_set(nvlist_t *ereport, int version, const char *erpt_class,
761 uint64_t ena, const nvlist_t *detector, ...)
762{
763 char ereport_class[FM_MAX_CLASS];
764 const char *name;
765 va_list ap;
766 int ret;
767
768 if (version != FM_EREPORT_VERS0) {
bc89ac84 769 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
fa42225a
BB
770 return;
771 }
772
773 (void) snprintf(ereport_class, FM_MAX_CLASS, "%s.%s",
774 FM_EREPORT_CLASS, erpt_class);
775 if (nvlist_add_string(ereport, FM_CLASS, ereport_class) != 0) {
bc89ac84 776 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
fa42225a
BB
777 return;
778 }
779
780 if (nvlist_add_uint64(ereport, FM_EREPORT_ENA, ena)) {
bc89ac84 781 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
fa42225a
BB
782 }
783
784 if (nvlist_add_nvlist(ereport, FM_EREPORT_DETECTOR,
785 (nvlist_t *)detector) != 0) {
bc89ac84 786 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
fa42225a
BB
787 }
788
789 va_start(ap, detector);
790 name = va_arg(ap, const char *);
791 ret = i_fm_payload_set(ereport, name, ap);
792 va_end(ap);
793
794 if (ret)
bc89ac84 795 atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
fa42225a
BB
796}
797
798/*
799 * Set-up and validate the members of an hc fmri according to;
800 *
801 * Member name Type Value
802 * ===================================================
803 * version uint8_t 0
804 * auth nvlist_t <auth>
805 * hc-name string <name>
806 * hc-id string <id>
807 *
808 * Note that auth and hc-id are optional members.
809 */
810
811#define HC_MAXPAIRS 20
812#define HC_MAXNAMELEN 50
813
814static int
815fm_fmri_hc_set_common(nvlist_t *fmri, int version, const nvlist_t *auth)
816{
817 if (version != FM_HC_SCHEME_VERSION) {
bc89ac84 818 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
fa42225a
BB
819 return (0);
820 }
821
822 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0 ||
823 nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_HC) != 0) {
bc89ac84 824 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
fa42225a
BB
825 return (0);
826 }
827
828 if (auth != NULL && nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
829 (nvlist_t *)auth) != 0) {
bc89ac84 830 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
fa42225a
BB
831 return (0);
832 }
833
834 return (1);
835}
836
837void
838fm_fmri_hc_set(nvlist_t *fmri, int version, const nvlist_t *auth,
839 nvlist_t *snvl, int npairs, ...)
840{
841 nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
842 nvlist_t *pairs[HC_MAXPAIRS];
843 va_list ap;
844 int i;
845
846 if (!fm_fmri_hc_set_common(fmri, version, auth))
847 return;
848
849 npairs = MIN(npairs, HC_MAXPAIRS);
850
851 va_start(ap, npairs);
852 for (i = 0; i < npairs; i++) {
853 const char *name = va_arg(ap, const char *);
854 uint32_t id = va_arg(ap, uint32_t);
855 char idstr[11];
856
857 (void) snprintf(idstr, sizeof (idstr), "%u", id);
858
859 pairs[i] = fm_nvlist_create(nva);
860 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
861 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
bc89ac84
JJS
862 atomic_inc_64(
863 &erpt_kstat_data.fmri_set_failed.value.ui64);
fa42225a
BB
864 }
865 }
866 va_end(ap);
867
795075e6
PD
868 if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST,
869 (const nvlist_t **)pairs, npairs) != 0) {
bc89ac84 870 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
795075e6 871 }
fa42225a
BB
872
873 for (i = 0; i < npairs; i++)
874 fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
875
876 if (snvl != NULL) {
877 if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
bc89ac84
JJS
878 atomic_inc_64(
879 &erpt_kstat_data.fmri_set_failed.value.ui64);
fa42225a
BB
880 }
881 }
882}
883
26685276
BB
884void
885fm_fmri_hc_create(nvlist_t *fmri, int version, const nvlist_t *auth,
886 nvlist_t *snvl, nvlist_t *bboard, int npairs, ...)
887{
888 nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
889 nvlist_t *pairs[HC_MAXPAIRS];
890 nvlist_t **hcl;
891 uint_t n;
892 int i, j;
893 va_list ap;
d1807f16 894 const char *hcname, *hcid;
26685276
BB
895
896 if (!fm_fmri_hc_set_common(fmri, version, auth))
897 return;
898
899 /*
900 * copy the bboard nvpairs to the pairs array
901 */
902 if (nvlist_lookup_nvlist_array(bboard, FM_FMRI_HC_LIST, &hcl, &n)
903 != 0) {
bc89ac84 904 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
26685276
BB
905 return;
906 }
907
908 for (i = 0; i < n; i++) {
909 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME,
910 &hcname) != 0) {
bc89ac84
JJS
911 atomic_inc_64(
912 &erpt_kstat_data.fmri_set_failed.value.ui64);
26685276
BB
913 return;
914 }
915 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, &hcid) != 0) {
bc89ac84
JJS
916 atomic_inc_64(
917 &erpt_kstat_data.fmri_set_failed.value.ui64);
26685276
BB
918 return;
919 }
920
921 pairs[i] = fm_nvlist_create(nva);
922 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, hcname) != 0 ||
923 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, hcid) != 0) {
924 for (j = 0; j <= i; j++) {
925 if (pairs[j] != NULL)
926 fm_nvlist_destroy(pairs[j],
927 FM_NVA_RETAIN);
928 }
bc89ac84
JJS
929 atomic_inc_64(
930 &erpt_kstat_data.fmri_set_failed.value.ui64);
26685276
BB
931 return;
932 }
933 }
934
935 /*
936 * create the pairs from passed in pairs
937 */
938 npairs = MIN(npairs, HC_MAXPAIRS);
939
940 va_start(ap, npairs);
941 for (i = n; i < npairs + n; i++) {
942 const char *name = va_arg(ap, const char *);
943 uint32_t id = va_arg(ap, uint32_t);
944 char idstr[11];
945 (void) snprintf(idstr, sizeof (idstr), "%u", id);
946 pairs[i] = fm_nvlist_create(nva);
947 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
948 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
949 for (j = 0; j <= i; j++) {
950 if (pairs[j] != NULL)
951 fm_nvlist_destroy(pairs[j],
952 FM_NVA_RETAIN);
953 }
bc89ac84
JJS
954 atomic_inc_64(
955 &erpt_kstat_data.fmri_set_failed.value.ui64);
711b35dc 956 va_end(ap);
26685276
BB
957 return;
958 }
959 }
960 va_end(ap);
961
962 /*
963 * Create the fmri hc list
964 */
795075e6
PD
965 if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST,
966 (const nvlist_t **)pairs, npairs + n) != 0) {
bc89ac84 967 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
26685276
BB
968 return;
969 }
970
971 for (i = 0; i < npairs + n; i++) {
972 fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
973 }
974
975 if (snvl != NULL) {
976 if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
bc89ac84
JJS
977 atomic_inc_64(
978 &erpt_kstat_data.fmri_set_failed.value.ui64);
26685276
BB
979 return;
980 }
981 }
982}
983
fa42225a
BB
984/*
985 * Set-up and validate the members of an dev fmri according to:
986 *
987 * Member name Type Value
988 * ====================================================
989 * version uint8_t 0
990 * auth nvlist_t <auth>
991 * devpath string <devpath>
428870ff
BB
992 * [devid] string <devid>
993 * [target-port-l0id] string <target-port-lun0-id>
fa42225a
BB
994 *
995 * Note that auth and devid are optional members.
996 */
997void
998fm_fmri_dev_set(nvlist_t *fmri_dev, int version, const nvlist_t *auth,
428870ff 999 const char *devpath, const char *devid, const char *tpl0)
fa42225a 1000{
428870ff
BB
1001 int err = 0;
1002
fa42225a 1003 if (version != DEV_SCHEME_VERSION0) {
bc89ac84 1004 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
fa42225a
BB
1005 return;
1006 }
1007
428870ff
BB
1008 err |= nvlist_add_uint8(fmri_dev, FM_VERSION, version);
1009 err |= nvlist_add_string(fmri_dev, FM_FMRI_SCHEME, FM_FMRI_SCHEME_DEV);
fa42225a
BB
1010
1011 if (auth != NULL) {
428870ff
BB
1012 err |= nvlist_add_nvlist(fmri_dev, FM_FMRI_AUTHORITY,
1013 (nvlist_t *)auth);
fa42225a
BB
1014 }
1015
428870ff 1016 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_PATH, devpath);
fa42225a
BB
1017
1018 if (devid != NULL)
428870ff
BB
1019 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_ID, devid);
1020
1021 if (tpl0 != NULL)
1022 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_TGTPTLUN0, tpl0);
1023
1024 if (err)
bc89ac84 1025 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
428870ff 1026
fa42225a
BB
1027}
1028
1029/*
1030 * Set-up and validate the members of an cpu fmri according to:
1031 *
1032 * Member name Type Value
1033 * ====================================================
1034 * version uint8_t 0
1035 * auth nvlist_t <auth>
1036 * cpuid uint32_t <cpu_id>
1037 * cpumask uint8_t <cpu_mask>
1038 * serial uint64_t <serial_id>
1039 *
1040 * Note that auth, cpumask, serial are optional members.
1041 *
1042 */
1043void
1044fm_fmri_cpu_set(nvlist_t *fmri_cpu, int version, const nvlist_t *auth,
1045 uint32_t cpu_id, uint8_t *cpu_maskp, const char *serial_idp)
1046{
1047 uint64_t *failedp = &erpt_kstat_data.fmri_set_failed.value.ui64;
1048
1049 if (version < CPU_SCHEME_VERSION1) {
bc89ac84 1050 atomic_inc_64(failedp);
fa42225a
BB
1051 return;
1052 }
1053
1054 if (nvlist_add_uint8(fmri_cpu, FM_VERSION, version) != 0) {
bc89ac84 1055 atomic_inc_64(failedp);
fa42225a
BB
1056 return;
1057 }
1058
1059 if (nvlist_add_string(fmri_cpu, FM_FMRI_SCHEME,
1060 FM_FMRI_SCHEME_CPU) != 0) {
bc89ac84 1061 atomic_inc_64(failedp);
fa42225a
BB
1062 return;
1063 }
1064
1065 if (auth != NULL && nvlist_add_nvlist(fmri_cpu, FM_FMRI_AUTHORITY,
1066 (nvlist_t *)auth) != 0)
bc89ac84 1067 atomic_inc_64(failedp);
fa42225a
BB
1068
1069 if (nvlist_add_uint32(fmri_cpu, FM_FMRI_CPU_ID, cpu_id) != 0)
bc89ac84 1070 atomic_inc_64(failedp);
fa42225a
BB
1071
1072 if (cpu_maskp != NULL && nvlist_add_uint8(fmri_cpu, FM_FMRI_CPU_MASK,
1073 *cpu_maskp) != 0)
bc89ac84 1074 atomic_inc_64(failedp);
fa42225a
BB
1075
1076 if (serial_idp == NULL || nvlist_add_string(fmri_cpu,
1077 FM_FMRI_CPU_SERIAL_ID, (char *)serial_idp) != 0)
bc89ac84 1078 atomic_inc_64(failedp);
fa42225a
BB
1079}
1080
1081/*
1082 * Set-up and validate the members of a mem according to:
1083 *
1084 * Member name Type Value
1085 * ====================================================
1086 * version uint8_t 0
1087 * auth nvlist_t <auth> [optional]
1088 * unum string <unum>
1089 * serial string <serial> [optional*]
1090 * offset uint64_t <offset> [optional]
1091 *
1092 * * serial is required if offset is present
1093 */
1094void
1095fm_fmri_mem_set(nvlist_t *fmri, int version, const nvlist_t *auth,
1096 const char *unum, const char *serial, uint64_t offset)
1097{
1098 if (version != MEM_SCHEME_VERSION0) {
bc89ac84 1099 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
fa42225a
BB
1100 return;
1101 }
1102
1103 if (!serial && (offset != (uint64_t)-1)) {
bc89ac84 1104 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
fa42225a
BB
1105 return;
1106 }
1107
1108 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
bc89ac84 1109 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
fa42225a
BB
1110 return;
1111 }
1112
1113 if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_MEM) != 0) {
bc89ac84 1114 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
fa42225a
BB
1115 return;
1116 }
1117
1118 if (auth != NULL) {
1119 if (nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
1120 (nvlist_t *)auth) != 0) {
bc89ac84
JJS
1121 atomic_inc_64(
1122 &erpt_kstat_data.fmri_set_failed.value.ui64);
fa42225a
BB
1123 }
1124 }
1125
1126 if (nvlist_add_string(fmri, FM_FMRI_MEM_UNUM, unum) != 0) {
bc89ac84 1127 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
fa42225a
BB
1128 }
1129
1130 if (serial != NULL) {
1131 if (nvlist_add_string_array(fmri, FM_FMRI_MEM_SERIAL_ID,
795075e6 1132 (const char **)&serial, 1) != 0) {
bc89ac84
JJS
1133 atomic_inc_64(
1134 &erpt_kstat_data.fmri_set_failed.value.ui64);
fa42225a 1135 }
bc89ac84
JJS
1136 if (offset != (uint64_t)-1 && nvlist_add_uint64(fmri,
1137 FM_FMRI_MEM_OFFSET, offset) != 0) {
1138 atomic_inc_64(
1139 &erpt_kstat_data.fmri_set_failed.value.ui64);
fa42225a
BB
1140 }
1141 }
1142}
1143
1144void
1145fm_fmri_zfs_set(nvlist_t *fmri, int version, uint64_t pool_guid,
1146 uint64_t vdev_guid)
1147{
1148 if (version != ZFS_SCHEME_VERSION0) {
bc89ac84 1149 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
fa42225a
BB
1150 return;
1151 }
1152
1153 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
bc89ac84 1154 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
fa42225a
BB
1155 return;
1156 }
1157
1158 if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_ZFS) != 0) {
bc89ac84 1159 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
fa42225a
BB
1160 return;
1161 }
1162
1163 if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_POOL, pool_guid) != 0) {
bc89ac84 1164 atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
fa42225a
BB
1165 }
1166
1167 if (vdev_guid != 0) {
1168 if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_VDEV, vdev_guid) != 0) {
bc89ac84
JJS
1169 atomic_inc_64(
1170 &erpt_kstat_data.fmri_set_failed.value.ui64);
fa42225a
BB
1171 }
1172 }
1173}
1174
1175uint64_t
1176fm_ena_increment(uint64_t ena)
1177{
1178 uint64_t new_ena;
1179
1180 switch (ENA_FORMAT(ena)) {
1181 case FM_ENA_FMT1:
1182 new_ena = ena + (1 << ENA_FMT1_GEN_SHFT);
1183 break;
1184 case FM_ENA_FMT2:
1185 new_ena = ena + (1 << ENA_FMT2_GEN_SHFT);
1186 break;
1187 default:
1188 new_ena = 0;
1189 }
1190
1191 return (new_ena);
1192}
1193
1194uint64_t
1195fm_ena_generate_cpu(uint64_t timestamp, processorid_t cpuid, uchar_t format)
1196{
1197 uint64_t ena = 0;
1198
1199 switch (format) {
1200 case FM_ENA_FMT1:
1201 if (timestamp) {
1202 ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1203 ((cpuid << ENA_FMT1_CPUID_SHFT) &
1204 ENA_FMT1_CPUID_MASK) |
1205 ((timestamp << ENA_FMT1_TIME_SHFT) &
1206 ENA_FMT1_TIME_MASK));
1207 } else {
1208 ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1209 ((cpuid << ENA_FMT1_CPUID_SHFT) &
1210 ENA_FMT1_CPUID_MASK) |
26685276 1211 ((gethrtime() << ENA_FMT1_TIME_SHFT) &
fa42225a
BB
1212 ENA_FMT1_TIME_MASK));
1213 }
1214 break;
1215 case FM_ENA_FMT2:
1216 ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1217 ((timestamp << ENA_FMT2_TIME_SHFT) & ENA_FMT2_TIME_MASK));
1218 break;
1219 default:
1220 break;
1221 }
1222
1223 return (ena);
1224}
1225
1226uint64_t
1227fm_ena_generate(uint64_t timestamp, uchar_t format)
1228{
15a9e033
PS
1229 uint64_t ena;
1230
1231 kpreempt_disable();
1232 ena = fm_ena_generate_cpu(timestamp, getcpuid(), format);
1233 kpreempt_enable();
1234
1235 return (ena);
fa42225a
BB
1236}
1237
1238uint64_t
1239fm_ena_generation_get(uint64_t ena)
1240{
1241 uint64_t gen;
1242
1243 switch (ENA_FORMAT(ena)) {
1244 case FM_ENA_FMT1:
1245 gen = (ena & ENA_FMT1_GEN_MASK) >> ENA_FMT1_GEN_SHFT;
1246 break;
1247 case FM_ENA_FMT2:
1248 gen = (ena & ENA_FMT2_GEN_MASK) >> ENA_FMT2_GEN_SHFT;
1249 break;
1250 default:
1251 gen = 0;
1252 break;
1253 }
1254
1255 return (gen);
1256}
1257
1258uchar_t
1259fm_ena_format_get(uint64_t ena)
1260{
1261
1262 return (ENA_FORMAT(ena));
1263}
1264
1265uint64_t
1266fm_ena_id_get(uint64_t ena)
1267{
1268 uint64_t id;
1269
1270 switch (ENA_FORMAT(ena)) {
1271 case FM_ENA_FMT1:
1272 id = (ena & ENA_FMT1_ID_MASK) >> ENA_FMT1_ID_SHFT;
1273 break;
1274 case FM_ENA_FMT2:
1275 id = (ena & ENA_FMT2_ID_MASK) >> ENA_FMT2_ID_SHFT;
1276 break;
1277 default:
1278 id = 0;
1279 }
1280
1281 return (id);
1282}
1283
1284uint64_t
1285fm_ena_time_get(uint64_t ena)
1286{
1287 uint64_t time;
1288
1289 switch (ENA_FORMAT(ena)) {
1290 case FM_ENA_FMT1:
1291 time = (ena & ENA_FMT1_TIME_MASK) >> ENA_FMT1_TIME_SHFT;
1292 break;
1293 case FM_ENA_FMT2:
1294 time = (ena & ENA_FMT2_TIME_MASK) >> ENA_FMT2_TIME_SHFT;
1295 break;
1296 default:
1297 time = 0;
1298 }
1299
1300 return (time);
1301}
1302
6078881a
TH
1303#ifdef _KERNEL
1304/*
1305 * Helper function to increment ereport dropped count. Used by the event
1306 * rate limiting code to give feedback to the user about how many events were
1307 * rate limited by including them in the 'dropped' count.
1308 */
1309void
1310fm_erpt_dropped_increment(void)
1311{
1312 atomic_inc_64(&ratelimit_dropped);
1313}
6078881a 1314
fa42225a 1315void
26685276 1316fm_init(void)
fa42225a 1317{
26685276
BB
1318 zevent_len_cur = 0;
1319 zevent_flags = 0;
fa42225a 1320
26685276
BB
1321 /* Initialize zevent allocation and generation kstats */
1322 fm_ksp = kstat_create("zfs", 0, "fm", "misc", KSTAT_TYPE_NAMED,
1323 sizeof (struct erpt_kstat) / sizeof (kstat_named_t),
1324 KSTAT_FLAG_VIRTUAL);
1325
1326 if (fm_ksp != NULL) {
1327 fm_ksp->ks_data = &erpt_kstat_data;
1328 kstat_install(fm_ksp);
1329 } else {
1330 cmn_err(CE_NOTE, "failed to create fm/misc kstat\n");
1331 }
1332
1333 mutex_init(&zevent_lock, NULL, MUTEX_DEFAULT, NULL);
d1d7e268
MK
1334 list_create(&zevent_list, sizeof (zevent_t),
1335 offsetof(zevent_t, ev_node));
26685276 1336 cv_init(&zevent_cv, NULL, CV_DEFAULT, NULL);
4f072827
DB
1337
1338 zfs_ereport_init();
fa42225a 1339}
428870ff
BB
1340
1341void
26685276 1342fm_fini(void)
428870ff 1343{
fdc2d303 1344 uint_t count;
428870ff 1345
4f072827
DB
1346 zfs_ereport_fini();
1347
26685276 1348 zfs_zevent_drain_all(&count);
428870ff 1349
26685276 1350 mutex_enter(&zevent_lock);
99db9bfd
BB
1351 cv_broadcast(&zevent_cv);
1352
26685276
BB
1353 zevent_flags |= ZEVENT_SHUTDOWN;
1354 while (zevent_waiters > 0) {
1355 mutex_exit(&zevent_lock);
0e4c830b 1356 kpreempt(KPREEMPT_SYNC);
26685276 1357 mutex_enter(&zevent_lock);
428870ff 1358 }
26685276 1359 mutex_exit(&zevent_lock);
428870ff 1360
26685276
BB
1361 cv_destroy(&zevent_cv);
1362 list_destroy(&zevent_list);
1363 mutex_destroy(&zevent_lock);
428870ff 1364
26685276
BB
1365 if (fm_ksp != NULL) {
1366 kstat_delete(fm_ksp);
1367 fm_ksp = NULL;
428870ff 1368 }
26685276 1369}
5f087dda 1370#endif /* _KERNEL */
428870ff 1371
fdc2d303 1372ZFS_MODULE_PARAM(zfs_zevent, zfs_zevent_, len_max, UINT, ZMOD_RW,
5f087dda 1373 "Max event queue length");