4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2016, 2017, Intel Corporation.
26 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
30 * ZFS syseventd module.
32 * file origin: openzfs/usr/src/cmd/syseventd/modules/zfs_mod/zfs_mod.c
34 * The purpose of this module is to identify when devices are added to the
35 * system, and appropriately online or replace the affected vdevs.
37 * When a device is added to the system:
39 * 1. Search for any vdevs whose devid matches that of the newly added
42 * 2. If no vdevs are found, then search for any vdevs whose udev path
43 * matches that of the new device.
45 * 3. If no vdevs match by either method, then ignore the event.
47 * 4. Attempt to online the device with a flag to indicate that it should
48 * be unspared when resilvering completes. If this succeeds, then the
49 * same device was inserted and we should continue normally.
51 * 5. If the pool does not have the 'autoreplace' property set, attempt to
52 * online the device again without the unspare flag, which will
53 * generate a FMA fault.
55 * 6. If the pool has the 'autoreplace' property set, and the matching vdev
56 * is a whole disk, then label the new disk and attempt a 'zpool
59 * The module responds to EC_DEV_ADD events. The special ESC_ZFS_VDEV_CHECK
60 * event indicates that a device failed to open during pool load, but the
61 * autoreplace property was set. In this case, we deferred the associated
62 * FMA fault until our module had a chance to process the autoreplace logic.
63 * If the device could not be replaced, then the second online attempt will
64 * trigger the FMA fault that we skipped earlier.
66 * ZFS on Linux porting notes:
67 * In lieu of a thread pool, just spawn a thread on demmand.
68 * Linux udev provides a disk insert for both the disk and the partition
75 #include <libnvpair.h>
83 #include <sys/sunddi.h>
84 #include <sys/sysevent/eventdefs.h>
85 #include <sys/sysevent/dev.h>
88 #include "zfs_agents.h"
89 #include "../zed_log.h"
91 #define DEV_BYID_PATH "/dev/disk/by-id/"
92 #define DEV_BYPATH_PATH "/dev/disk/by-path/"
93 #define DEV_BYVDEV_PATH "/dev/disk/by-vdev/"
95 typedef void (*zfs_process_func_t
)(zpool_handle_t
*, nvlist_t
*, boolean_t
);
97 libzfs_handle_t
*g_zfshdl
;
98 list_t g_pool_list
; /* list of unavailable pools at initialization */
99 list_t g_device_list
; /* list of disks with asynchronous label request */
100 boolean_t g_enumeration_done
;
103 typedef struct unavailpool
{
104 zpool_handle_t
*uap_zhp
;
105 pthread_t uap_enable_tid
; /* dataset enable thread if activated */
106 list_node_t uap_node
;
109 typedef struct pendingdev
{
110 char pd_physpath
[128];
115 zfs_toplevel_state(zpool_handle_t
*zhp
)
121 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
122 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
123 verify(nvlist_lookup_uint64_array(nvroot
, ZPOOL_CONFIG_VDEV_STATS
,
124 (uint64_t **)&vs
, &c
) == 0);
125 return (vs
->vs_state
);
129 zfs_unavail_pool(zpool_handle_t
*zhp
, void *data
)
131 zed_log_msg(LOG_INFO
, "zfs_unavail_pool: examining '%s' (state %d)",
132 zpool_get_name(zhp
), (int)zfs_toplevel_state(zhp
));
134 if (zfs_toplevel_state(zhp
) < VDEV_STATE_DEGRADED
) {
136 uap
= malloc(sizeof (unavailpool_t
));
138 uap
->uap_enable_tid
= 0;
139 list_insert_tail((list_t
*)data
, uap
);
147 * Two stage replace on Linux
148 * since we get disk notifications
149 * we can wait for partitioned disk slice to show up!
151 * First stage tags the disk, initiates async partitioning, and returns
152 * Second stage finds the tag and proceeds to ZFS labeling/replace
154 * disk-add --> label-disk + tag-disk --> partition-add --> zpool_vdev_attach
156 * 1. physical match with no fs, no partition
157 * tag it top, partition disk
159 * 2. physical match again, see partion and tag
164 * The device associated with the given vdev (either by devid or physical path)
165 * has been added to the system. If 'isdisk' is set, then we only attempt a
166 * replacement if it's a whole disk. This also implies that we should label the
169 * First, we attempt to online the device (making sure to undo any spare
170 * operation when finished). If this succeeds, then we're done. If it fails,
171 * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
172 * but that the label was not what we expected. If the 'autoreplace' property
173 * is enabled, then we relabel the disk (if specified), and attempt a 'zpool
174 * replace'. If the online is successful, but the new state is something else
175 * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
176 * race, and we should avoid attempting to relabel the disk.
178 * Also can arrive here from a ESC_ZFS_VDEV_CHECK event
181 zfs_process_add(zpool_handle_t
*zhp
, nvlist_t
*vdev
, boolean_t labeled
)
184 vdev_state_t newstate
;
185 nvlist_t
*nvroot
, *newvd
;
186 pendingdev_t
*device
;
187 uint64_t wholedisk
= 0ULL;
188 uint64_t offline
= 0ULL;
189 uint64_t guid
= 0ULL;
190 char *physpath
= NULL
, *new_devid
= NULL
, *enc_sysfs_path
= NULL
;
191 char rawpath
[PATH_MAX
], fullpath
[PATH_MAX
];
192 char devpath
[PATH_MAX
];
199 if (nvlist_lookup_string(vdev
, ZPOOL_CONFIG_PATH
, &path
) != 0)
202 /* Skip healthy disks */
203 verify(nvlist_lookup_uint64_array(vdev
, ZPOOL_CONFIG_VDEV_STATS
,
204 (uint64_t **)&vs
, &c
) == 0);
205 if (vs
->vs_state
== VDEV_STATE_HEALTHY
) {
206 zed_log_msg(LOG_INFO
, "%s: %s is already healthy, skip it.",
211 (void) nvlist_lookup_string(vdev
, ZPOOL_CONFIG_PHYS_PATH
, &physpath
);
212 (void) nvlist_lookup_string(vdev
, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH
,
214 (void) nvlist_lookup_uint64(vdev
, ZPOOL_CONFIG_WHOLE_DISK
, &wholedisk
);
215 (void) nvlist_lookup_uint64(vdev
, ZPOOL_CONFIG_OFFLINE
, &offline
);
216 (void) nvlist_lookup_uint64(vdev
, ZPOOL_CONFIG_GUID
, &guid
);
219 return; /* don't intervene if it was taken offline */
221 is_dm
= zfs_dev_is_dm(path
);
222 zed_log_msg(LOG_INFO
, "zfs_process_add: pool '%s' vdev '%s', phys '%s'"
223 " wholedisk %d, dm %d (%llu)", zpool_get_name(zhp
), path
,
224 physpath
? physpath
: "NULL", wholedisk
, is_dm
,
225 (long long unsigned int)guid
);
228 * The VDEV guid is preferred for identification (gets passed in path)
231 (void) snprintf(fullpath
, sizeof (fullpath
), "%llu",
232 (long long unsigned int)guid
);
235 * otherwise use path sans partition suffix for whole disks
237 (void) strlcpy(fullpath
, path
, sizeof (fullpath
));
239 char *spath
= zfs_strip_partition(fullpath
);
241 zed_log_msg(LOG_INFO
, "%s: Can't alloc",
246 (void) strlcpy(fullpath
, spath
, sizeof (fullpath
));
252 * Attempt to online the device.
254 if (zpool_vdev_online(zhp
, fullpath
,
255 ZFS_ONLINE_CHECKREMOVE
| ZFS_ONLINE_UNSPARE
, &newstate
) == 0 &&
256 (newstate
== VDEV_STATE_HEALTHY
||
257 newstate
== VDEV_STATE_DEGRADED
)) {
258 zed_log_msg(LOG_INFO
, " zpool_vdev_online: vdev %s is %s",
259 fullpath
, (newstate
== VDEV_STATE_HEALTHY
) ?
260 "HEALTHY" : "DEGRADED");
265 * vdev_id alias rule for using scsi_debug devices (FMA automated
268 if (physpath
!= NULL
&& strcmp("scsidebug", physpath
) == 0)
272 * If the pool doesn't have the autoreplace property set, then use
273 * vdev online to trigger a FMA fault by posting an ereport.
275 if (!zpool_get_prop_int(zhp
, ZPOOL_PROP_AUTOREPLACE
, NULL
) ||
276 !(wholedisk
|| is_dm
) || (physpath
== NULL
)) {
277 (void) zpool_vdev_online(zhp
, fullpath
, ZFS_ONLINE_FORCEFAULT
,
279 zed_log_msg(LOG_INFO
, "Pool's autoreplace is not enabled or "
280 "not a whole disk for '%s'", fullpath
);
285 * Convert physical path into its current device node. Rawpath
286 * needs to be /dev/disk/by-vdev for a scsi_debug device since
287 * /dev/disk/by-path will not be present.
289 (void) snprintf(rawpath
, sizeof (rawpath
), "%s%s",
290 is_sd
? DEV_BYVDEV_PATH
: DEV_BYPATH_PATH
, physpath
);
292 if (realpath(rawpath
, devpath
) == NULL
&& !is_dm
) {
293 zed_log_msg(LOG_INFO
, " realpath: %s failed (%s)",
294 rawpath
, strerror(errno
));
296 (void) zpool_vdev_online(zhp
, fullpath
, ZFS_ONLINE_FORCEFAULT
,
299 zed_log_msg(LOG_INFO
, " zpool_vdev_online: %s FORCEFAULT (%s)",
300 fullpath
, libzfs_error_description(g_zfshdl
));
304 /* Only autoreplace bad disks */
305 if ((vs
->vs_state
!= VDEV_STATE_DEGRADED
) &&
306 (vs
->vs_state
!= VDEV_STATE_FAULTED
) &&
307 (vs
->vs_state
!= VDEV_STATE_CANT_OPEN
)) {
311 nvlist_lookup_string(vdev
, "new_devid", &new_devid
);
314 /* Don't label device mapper or multipath disks. */
315 } else if (!labeled
) {
317 * we're auto-replacing a raw disk, so label it first
322 * If this is a request to label a whole disk, then attempt to
323 * write out the label. Before we can label the disk, we need
324 * to map the physical string that was matched on to the under
327 * If any part of this process fails, then do a force online
328 * to trigger a ZFS fault for the device (and any hot spare
331 leafname
= strrchr(devpath
, '/') + 1;
334 * If this is a request to label a whole disk, then attempt to
335 * write out the label.
337 if (zpool_label_disk(g_zfshdl
, zhp
, leafname
) != 0) {
338 zed_log_msg(LOG_INFO
, " zpool_label_disk: could not "
339 "label '%s' (%s)", leafname
,
340 libzfs_error_description(g_zfshdl
));
342 (void) zpool_vdev_online(zhp
, fullpath
,
343 ZFS_ONLINE_FORCEFAULT
, &newstate
);
348 * The disk labeling is asynchronous on Linux. Just record
349 * this label request and return as there will be another
350 * disk add event for the partition after the labeling is
353 device
= malloc(sizeof (pendingdev_t
));
354 (void) strlcpy(device
->pd_physpath
, physpath
,
355 sizeof (device
->pd_physpath
));
356 list_insert_tail(&g_device_list
, device
);
358 zed_log_msg(LOG_INFO
, " zpool_label_disk: async '%s' (%llu)",
359 leafname
, (u_longlong_t
)guid
);
361 return; /* resumes at EC_DEV_ADD.ESC_DISK for partition */
363 } else /* labeled */ {
364 boolean_t found
= B_FALSE
;
366 * match up with request above to label the disk
368 for (device
= list_head(&g_device_list
); device
!= NULL
;
369 device
= list_next(&g_device_list
, device
)) {
370 if (strcmp(physpath
, device
->pd_physpath
) == 0) {
371 list_remove(&g_device_list
, device
);
376 zed_log_msg(LOG_INFO
, "zpool_label_disk: %s != %s",
377 physpath
, device
->pd_physpath
);
380 /* unexpected partition slice encountered */
381 zed_log_msg(LOG_INFO
, "labeled disk %s unexpected here",
383 (void) zpool_vdev_online(zhp
, fullpath
,
384 ZFS_ONLINE_FORCEFAULT
, &newstate
);
388 zed_log_msg(LOG_INFO
, " zpool_label_disk: resume '%s' (%llu)",
389 physpath
, (u_longlong_t
)guid
);
391 (void) snprintf(devpath
, sizeof (devpath
), "%s%s",
392 DEV_BYID_PATH
, new_devid
);
396 * Construct the root vdev to pass to zpool_vdev_attach(). While adding
397 * the entire vdev structure is harmless, we construct a reduced set of
398 * path/physpath/wholedisk to keep it simple.
400 if (nvlist_alloc(&nvroot
, NV_UNIQUE_NAME
, 0) != 0) {
401 zed_log_msg(LOG_WARNING
, "zfs_mod: nvlist_alloc out of memory");
404 if (nvlist_alloc(&newvd
, NV_UNIQUE_NAME
, 0) != 0) {
405 zed_log_msg(LOG_WARNING
, "zfs_mod: nvlist_alloc out of memory");
410 if (nvlist_add_string(newvd
, ZPOOL_CONFIG_TYPE
, VDEV_TYPE_DISK
) != 0 ||
411 nvlist_add_string(newvd
, ZPOOL_CONFIG_PATH
, path
) != 0 ||
412 nvlist_add_string(newvd
, ZPOOL_CONFIG_DEVID
, new_devid
) != 0 ||
413 (physpath
!= NULL
&& nvlist_add_string(newvd
,
414 ZPOOL_CONFIG_PHYS_PATH
, physpath
) != 0) ||
415 (enc_sysfs_path
!= NULL
&& nvlist_add_string(newvd
,
416 ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH
, enc_sysfs_path
) != 0) ||
417 nvlist_add_uint64(newvd
, ZPOOL_CONFIG_WHOLE_DISK
, wholedisk
) != 0 ||
418 nvlist_add_string(nvroot
, ZPOOL_CONFIG_TYPE
, VDEV_TYPE_ROOT
) != 0 ||
419 nvlist_add_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
, &newvd
,
421 zed_log_msg(LOG_WARNING
, "zfs_mod: unable to add nvlist pairs");
430 * auto replace a leaf disk at same physical location
432 ret
= zpool_vdev_attach(zhp
, fullpath
, path
, nvroot
, B_TRUE
);
434 zed_log_msg(LOG_INFO
, " zpool_vdev_replace: %s with %s (%s)",
435 fullpath
, path
, (ret
== 0) ? "no errors" :
436 libzfs_error_description(g_zfshdl
));
442 * Utility functions to find a vdev matching given criteria.
444 typedef struct dev_data
{
445 const char *dd_compare
;
447 zfs_process_func_t dd_func
;
449 boolean_t dd_islabeled
;
450 uint64_t dd_pool_guid
;
451 uint64_t dd_vdev_guid
;
452 const char *dd_new_devid
;
456 zfs_iter_vdev(zpool_handle_t
*zhp
, nvlist_t
*nvl
, void *data
)
458 dev_data_t
*dp
= data
;
464 * First iterate over any children.
466 if (nvlist_lookup_nvlist_array(nvl
, ZPOOL_CONFIG_CHILDREN
,
467 &child
, &children
) == 0) {
468 for (c
= 0; c
< children
; c
++)
469 zfs_iter_vdev(zhp
, child
[c
], data
);
473 /* once a vdev was matched and processed there is nothing left to do */
478 * Match by GUID if available otherwise fallback to devid or physical
480 if (dp
->dd_vdev_guid
!= 0) {
483 if (nvlist_lookup_uint64(nvl
, ZPOOL_CONFIG_GUID
,
484 &guid
) != 0 || guid
!= dp
->dd_vdev_guid
) {
487 zed_log_msg(LOG_INFO
, " zfs_iter_vdev: matched on %llu", guid
);
488 dp
->dd_found
= B_TRUE
;
490 } else if (dp
->dd_compare
!= NULL
) {
492 * NOTE: On Linux there is an event for partition, so unlike
493 * illumos, substring matching is not required to accommodate
494 * the partition suffix. An exact match will be present in
495 * the dp->dd_compare value.
497 if (nvlist_lookup_string(nvl
, dp
->dd_prop
, &path
) != 0 ||
498 strcmp(dp
->dd_compare
, path
) != 0)
501 zed_log_msg(LOG_INFO
, " zfs_iter_vdev: matched %s on %s",
503 dp
->dd_found
= B_TRUE
;
505 /* pass the new devid for use by replacing code */
506 if (dp
->dd_new_devid
!= NULL
) {
507 (void) nvlist_add_string(nvl
, "new_devid",
512 (dp
->dd_func
)(zhp
, nvl
, dp
->dd_islabeled
);
516 zfs_enable_ds(void *arg
)
518 unavailpool_t
*pool
= (unavailpool_t
*)arg
;
520 assert(pool
->uap_enable_tid
= pthread_self());
522 (void) zpool_enable_datasets(pool
->uap_zhp
, NULL
, 0);
523 zpool_close(pool
->uap_zhp
);
524 pool
->uap_zhp
= NULL
;
526 /* Note: zfs_slm_fini() will cleanup this pool entry on exit */
531 zfs_iter_pool(zpool_handle_t
*zhp
, void *data
)
533 nvlist_t
*config
, *nvl
;
534 dev_data_t
*dp
= data
;
538 zed_log_msg(LOG_INFO
, "zfs_iter_pool: evaluating vdevs on %s (by %s)",
539 zpool_get_name(zhp
), dp
->dd_vdev_guid
? "GUID" : dp
->dd_prop
);
542 * For each vdev in this pool, look for a match to apply dd_func
544 if ((config
= zpool_get_config(zhp
, NULL
)) != NULL
) {
545 if (dp
->dd_pool_guid
== 0 ||
546 (nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
547 &pool_guid
) == 0 && pool_guid
== dp
->dd_pool_guid
)) {
548 (void) nvlist_lookup_nvlist(config
,
549 ZPOOL_CONFIG_VDEV_TREE
, &nvl
);
550 zfs_iter_vdev(zhp
, nvl
, data
);
555 * if this pool was originally unavailable,
556 * then enable its datasets asynchronously
558 if (g_enumeration_done
) {
559 for (pool
= list_head(&g_pool_list
); pool
!= NULL
;
560 pool
= list_next(&g_pool_list
, pool
)) {
562 if (pool
->uap_enable_tid
!= 0)
563 continue; /* entry already processed */
564 if (strcmp(zpool_get_name(zhp
),
565 zpool_get_name(pool
->uap_zhp
)))
567 if (zfs_toplevel_state(zhp
) >= VDEV_STATE_DEGRADED
) {
568 /* send to a background thread; keep on list */
569 (void) pthread_create(&pool
->uap_enable_tid
,
570 NULL
, zfs_enable_ds
, pool
);
577 return (dp
->dd_found
); /* cease iteration after a match */
581 * Given a physical device location, iterate over all
582 * (pool, vdev) pairs which correspond to that location.
585 devphys_iter(const char *physical
, const char *devid
, zfs_process_func_t func
,
588 dev_data_t data
= { 0 };
590 data
.dd_compare
= physical
;
592 data
.dd_prop
= ZPOOL_CONFIG_PHYS_PATH
;
593 data
.dd_found
= B_FALSE
;
594 data
.dd_islabeled
= is_slice
;
595 data
.dd_new_devid
= devid
; /* used by auto replace code */
597 (void) zpool_iter(g_zfshdl
, zfs_iter_pool
, &data
);
599 return (data
.dd_found
);
603 * Given a device identifier, find any vdevs with a matching devid.
604 * On Linux we can match devid directly which is always a whole disk.
607 devid_iter(const char *devid
, zfs_process_func_t func
, boolean_t is_slice
)
609 dev_data_t data
= { 0 };
611 data
.dd_compare
= devid
;
613 data
.dd_prop
= ZPOOL_CONFIG_DEVID
;
614 data
.dd_found
= B_FALSE
;
615 data
.dd_islabeled
= is_slice
;
616 data
.dd_new_devid
= devid
;
618 (void) zpool_iter(g_zfshdl
, zfs_iter_pool
, &data
);
620 return (data
.dd_found
);
624 * Handle a EC_DEV_ADD.ESC_DISK event.
627 * Expects: DEV_PHYS_PATH string in schema
628 * Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
630 * path: '/dev/dsk/c0t1d0s0' (persistent)
631 * devid: 'id1,sd@SATA_____Hitachi_HDS72101______JP2940HZ3H74MC/a'
632 * phys_path: '/pci@0,0/pci103c,1609@11/disk@1,0:a'
635 * provides: DEV_PHYS_PATH and DEV_IDENTIFIER strings in schema
636 * Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
638 * path: '/dev/sdc1' (not persistent)
639 * devid: 'ata-SAMSUNG_HD204UI_S2HGJD2Z805891-part1'
640 * phys_path: 'pci-0000:04:00.0-sas-0x4433221106000000-lun-0'
643 zfs_deliver_add(nvlist_t
*nvl
, boolean_t is_lofi
)
645 char *devpath
= NULL
, *devid
;
649 * Expecting a devid string and an optional physical location
651 if (nvlist_lookup_string(nvl
, DEV_IDENTIFIER
, &devid
) != 0)
654 (void) nvlist_lookup_string(nvl
, DEV_PHYS_PATH
, &devpath
);
656 is_slice
= (nvlist_lookup_boolean(nvl
, DEV_IS_PART
) == 0);
658 zed_log_msg(LOG_INFO
, "zfs_deliver_add: adding %s (%s) (is_slice %d)",
659 devid
, devpath
? devpath
: "NULL", is_slice
);
662 * Iterate over all vdevs looking for a match in the folllowing order:
663 * 1. ZPOOL_CONFIG_DEVID (identifies the unique disk)
664 * 2. ZPOOL_CONFIG_PHYS_PATH (identifies disk physical location).
666 * For disks, we only want to pay attention to vdevs marked as whole
667 * disks or are a multipath device.
669 if (!devid_iter(devid
, zfs_process_add
, is_slice
) && devpath
!= NULL
)
670 (void) devphys_iter(devpath
, devid
, zfs_process_add
, is_slice
);
676 * Called when we receive a VDEV_CHECK event, which indicates a device could not
677 * be opened during initial pool open, but the autoreplace property was set on
678 * the pool. In this case, we treat it as if it were an add event.
681 zfs_deliver_check(nvlist_t
*nvl
)
683 dev_data_t data
= { 0 };
685 if (nvlist_lookup_uint64(nvl
, ZFS_EV_POOL_GUID
,
686 &data
.dd_pool_guid
) != 0 ||
687 nvlist_lookup_uint64(nvl
, ZFS_EV_VDEV_GUID
,
688 &data
.dd_vdev_guid
) != 0 ||
689 data
.dd_vdev_guid
== 0)
692 zed_log_msg(LOG_INFO
, "zfs_deliver_check: pool '%llu', vdev %llu",
693 data
.dd_pool_guid
, data
.dd_vdev_guid
);
695 data
.dd_func
= zfs_process_add
;
697 (void) zpool_iter(g_zfshdl
, zfs_iter_pool
, &data
);
703 zfsdle_vdev_online(zpool_handle_t
*zhp
, void *data
)
705 char *devname
= data
;
706 boolean_t avail_spare
, l2cache
;
707 vdev_state_t newstate
;
710 zed_log_msg(LOG_INFO
, "zfsdle_vdev_online: searching for '%s' in '%s'",
711 devname
, zpool_get_name(zhp
));
713 if ((tgt
= zpool_find_vdev_by_physpath(zhp
, devname
,
714 &avail_spare
, &l2cache
, NULL
)) != NULL
) {
715 char *path
, fullpath
[MAXPATHLEN
];
716 uint64_t wholedisk
= 0ULL;
718 verify(nvlist_lookup_string(tgt
, ZPOOL_CONFIG_PATH
,
720 verify(nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_WHOLE_DISK
,
723 (void) strlcpy(fullpath
, path
, sizeof (fullpath
));
725 char *spath
= zfs_strip_partition(fullpath
);
726 boolean_t scrub_restart
= B_TRUE
;
729 zed_log_msg(LOG_INFO
, "%s: Can't alloc",
734 (void) strlcpy(fullpath
, spath
, sizeof (fullpath
));
738 * We need to reopen the pool associated with this
739 * device so that the kernel can update the size
740 * of the expanded device.
742 (void) zpool_reopen_one(zhp
, &scrub_restart
);
745 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_AUTOEXPAND
, NULL
)) {
746 zed_log_msg(LOG_INFO
, "zfsdle_vdev_online: setting "
747 "device '%s' to ONLINE state in pool '%s'",
748 fullpath
, zpool_get_name(zhp
));
749 if (zpool_get_state(zhp
) != POOL_STATE_UNAVAIL
)
750 (void) zpool_vdev_online(zhp
, fullpath
, 0,
761 * This function handles the ESC_DEV_DLE event.
764 zfs_deliver_dle(nvlist_t
*nvl
)
768 if (nvlist_lookup_string(nvl
, DEV_PHYS_PATH
, &devname
) != 0) {
769 zed_log_msg(LOG_INFO
, "zfs_deliver_dle: no physpath");
773 if (zpool_iter(g_zfshdl
, zfsdle_vdev_online
, devname
) != 1) {
774 zed_log_msg(LOG_INFO
, "zfs_deliver_dle: device '%s' not "
782 * syseventd daemon module event handler
784 * Handles syseventd daemon zfs device related events:
786 * EC_DEV_ADD.ESC_DISK
787 * EC_DEV_STATUS.ESC_DEV_DLE
788 * EC_ZFS.ESC_ZFS_VDEV_CHECK
790 * Note: assumes only one thread active at a time (not thread safe)
793 zfs_slm_deliver_event(const char *class, const char *subclass
, nvlist_t
*nvl
)
796 boolean_t is_lofi
= B_FALSE
, is_check
= B_FALSE
, is_dle
= B_FALSE
;
798 if (strcmp(class, EC_DEV_ADD
) == 0) {
800 * We're mainly interested in disk additions, but we also listen
801 * for new loop devices, to allow for simplified testing.
803 if (strcmp(subclass
, ESC_DISK
) == 0)
805 else if (strcmp(subclass
, ESC_LOFI
) == 0)
811 } else if (strcmp(class, EC_ZFS
) == 0 &&
812 strcmp(subclass
, ESC_ZFS_VDEV_CHECK
) == 0) {
814 * This event signifies that a device failed to open
815 * during pool load, but the 'autoreplace' property was
816 * set, so we should pretend it's just been added.
819 } else if (strcmp(class, EC_DEV_STATUS
) == 0 &&
820 strcmp(subclass
, ESC_DEV_DLE
) == 0) {
827 ret
= zfs_deliver_dle(nvl
);
829 ret
= zfs_deliver_check(nvl
);
831 ret
= zfs_deliver_add(nvl
, is_lofi
);
838 zfs_enum_pools(void *arg
)
840 (void) zpool_iter(g_zfshdl
, zfs_unavail_pool
, (void *)&g_pool_list
);
842 * Linux - instead of using a thread pool, each list entry
843 * will spawn a thread when an unavailable pool transitions
844 * to available. zfs_slm_fini will wait for these threads.
846 g_enumeration_done
= B_TRUE
;
851 * called from zed daemon at startup
853 * sent messages from zevents or udev monitor
855 * For now, each agent has it's own libzfs instance
860 if ((g_zfshdl
= __libzfs_init()) == NULL
)
864 * collect a list of unavailable pools (asynchronously,
865 * since this can take a while)
867 list_create(&g_pool_list
, sizeof (struct unavailpool
),
868 offsetof(struct unavailpool
, uap_node
));
870 if (pthread_create(&g_zfs_tid
, NULL
, zfs_enum_pools
, NULL
) != 0) {
871 list_destroy(&g_pool_list
);
872 __libzfs_fini(g_zfshdl
);
876 list_create(&g_device_list
, sizeof (struct pendingdev
),
877 offsetof(struct pendingdev
, pd_node
));
886 pendingdev_t
*device
;
888 /* wait for zfs_enum_pools thread to complete */
889 (void) pthread_join(g_zfs_tid
, NULL
);
891 while ((pool
= (list_head(&g_pool_list
))) != NULL
) {
893 * each pool entry has two possibilities
894 * 1. was made available (so wait for zfs_enable_ds thread)
895 * 2. still unavailable (just close the pool)
897 if (pool
->uap_enable_tid
)
898 (void) pthread_join(pool
->uap_enable_tid
, NULL
);
899 else if (pool
->uap_zhp
!= NULL
)
900 zpool_close(pool
->uap_zhp
);
902 list_remove(&g_pool_list
, pool
);
905 list_destroy(&g_pool_list
);
907 while ((device
= (list_head(&g_device_list
))) != NULL
) {
908 list_remove(&g_device_list
, device
);
911 list_destroy(&g_device_list
);
913 __libzfs_fini(g_zfshdl
);
917 zfs_slm_event(const char *class, const char *subclass
, nvlist_t
*nvl
)
919 zed_log_msg(LOG_INFO
, "zfs_slm_event: %s.%s", class, subclass
);
920 (void) zfs_slm_deliver_event(class, subclass
, nvl
);