Some nvlist_t could be leaked in error handling paths.
Also make sure cb argument to zfs_zevent_post() cannnot
be NULL.
Signed-off-by: Isaac Huang <he.huang@intel.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #2158
extern void fm_init(void);
extern void fm_fini(void);
extern void fm_nvprint(nvlist_t *);
-extern void zfs_zevent_post(nvlist_t *, nvlist_t *, zevent_cb_t *);
+extern int zfs_zevent_post(nvlist_t *, nvlist_t *, zevent_cb_t *);
extern void zfs_zevent_drain_all(int *);
extern int zfs_zevent_fd_hold(int, minor_t *, zfs_zevent_t **);
extern void zfs_zevent_fd_rele(int);
}
/*
- * Post a zevent
+ * Post a zevent. The cb will be called when nvl and detector are no longer
+ * needed, i.e.:
+ * - An error happened and a zevent can't be posted. In this case, cb is called
+ * before zfs_zevent_post() returns.
+ * - The event is being drained and freed.
*/
-void
+int
zfs_zevent_post(nvlist_t *nvl, nvlist_t *detector, zevent_cb_t *cb)
{
int64_t tv_array[2];
uint64_t eid;
size_t nvl_size = 0;
zevent_t *ev;
+ int error;
+
+ ASSERT(cb != NULL);
gethrestime(&tv);
tv_array[0] = tv.tv_sec;
tv_array[1] = tv.tv_nsec;
- if (nvlist_add_int64_array(nvl, FM_EREPORT_TIME, tv_array, 2)) {
+
+ error = nvlist_add_int64_array(nvl, FM_EREPORT_TIME, tv_array, 2);
+ if (error) {
atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
- return;
+ goto out;
}
eid = atomic_inc_64_nv(&zevent_eid);
- if (nvlist_add_uint64(nvl, FM_EREPORT_EID, eid)) {
+ error = nvlist_add_uint64(nvl, FM_EREPORT_EID, eid);
+ if (error) {
atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
- return;
+ goto out;
+ }
+
+ error = nvlist_size(nvl, &nvl_size, NV_ENCODE_NATIVE);
+ if (error) {
+ atomic_add_64(&erpt_kstat_data.erpt_dropped.value.ui64, 1);
+ goto out;
}
- (void) nvlist_size(nvl, &nvl_size, NV_ENCODE_NATIVE);
if (nvl_size > ERPT_DATA_SZ || nvl_size == 0) {
atomic_add_64(&erpt_kstat_data.erpt_dropped.value.ui64, 1);
- return;
+ error = EOVERFLOW;
+ goto out;
}
if (zfs_zevent_console)
ev = zfs_zevent_alloc();
if (ev == NULL) {
atomic_add_64(&erpt_kstat_data.erpt_dropped.value.ui64, 1);
- return;
+ error = ENOMEM;
+ goto out;
}
ev->ev_nvl = nvl;
zfs_zevent_insert(ev);
cv_broadcast(&zevent_cv);
mutex_exit(&zevent_lock);
+
+out:
+ if (error)
+ cb(nvl, detector);
+
+ return (error);
}
static int
fm_nvlist_destroy(detector, FM_NVA_FREE);
}
+static void
+zfs_zevent_post_cb_noop(nvlist_t *nvl, nvlist_t *detector)
+{
+}
+
static void
zfs_ereport_start(nvlist_t **ereport_out, nvlist_t **detector_out,
const char *subclass, spa_t *spa, vdev_t *vd, zio_t *zio,
FM_EREPORT_ZFS_CHECKSUM, spa, vd, zio, offset, length);
if (report->zcr_ereport == NULL) {
- report->zcr_free(report->zcr_cbdata, report->zcr_cbinfo);
- if (report->zcr_ckinfo != NULL) {
- kmem_free(report->zcr_ckinfo,
- sizeof (*report->zcr_ckinfo));
- }
- kmem_free(report, sizeof (*report));
+ zfs_ereport_free_checksum(report);
return;
}
#endif
const void *good_data, const void *bad_data, boolean_t drop_if_identical)
{
#ifdef _KERNEL
- zfs_ecksum_info_t *info = NULL;
+ zfs_ecksum_info_t *info;
+
info = annotate_ecksum(report->zcr_ereport, report->zcr_ckinfo,
good_data, bad_data, report->zcr_length, drop_if_identical);
-
if (info != NULL)
zfs_zevent_post(report->zcr_ereport,
report->zcr_detector, zfs_zevent_post_cb);
+ else
+ zfs_zevent_post_cb(report->zcr_ereport, report->zcr_detector);
report->zcr_ereport = report->zcr_detector = NULL;
if (info != NULL)
zfs_ereport_send_interim_checksum(zio_cksum_report_t *report)
{
#ifdef _KERNEL
- zfs_zevent_post(report->zcr_ereport, report->zcr_detector, NULL);
+ zfs_zevent_post(report->zcr_ereport, report->zcr_detector,
+ zfs_zevent_post_cb_noop);
#endif
}