4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2016, 2017, Intel Corporation.
26 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
30 * ZFS syseventd module.
32 * file origin: openzfs/usr/src/cmd/syseventd/modules/zfs_mod/zfs_mod.c
34 * The purpose of this module is to identify when devices are added to the
35 * system, and appropriately online or replace the affected vdevs.
37 * When a device is added to the system:
39 * 1. Search for any vdevs whose devid matches that of the newly added
42 * 2. If no vdevs are found, then search for any vdevs whose udev path
43 * matches that of the new device.
45 * 3. If no vdevs match by either method, then ignore the event.
47 * 4. Attempt to online the device with a flag to indicate that it should
48 * be unspared when resilvering completes. If this succeeds, then the
49 * same device was inserted and we should continue normally.
51 * 5. If the pool does not have the 'autoreplace' property set, attempt to
52 * online the device again without the unspare flag, which will
53 * generate a FMA fault.
55 * 6. If the pool has the 'autoreplace' property set, and the matching vdev
56 * is a whole disk, then label the new disk and attempt a 'zpool
59 * The module responds to EC_DEV_ADD events. The special ESC_ZFS_VDEV_CHECK
60 * event indicates that a device failed to open during pool load, but the
61 * autoreplace property was set. In this case, we deferred the associated
62 * FMA fault until our module had a chance to process the autoreplace logic.
63 * If the device could not be replaced, then the second online attempt will
64 * trigger the FMA fault that we skipped earlier.
66 * ZFS on Linux porting notes:
67 * Linux udev provides a disk insert for both the disk and the partition
74 #include <libnvpair.h>
83 #include <sys/sunddi.h>
84 #include <sys/sysevent/eventdefs.h>
85 #include <sys/sysevent/dev.h>
86 #include <thread_pool.h>
90 #include "zfs_agents.h"
91 #include "../zed_log.h"
93 #define DEV_BYID_PATH "/dev/disk/by-id/"
94 #define DEV_BYPATH_PATH "/dev/disk/by-path/"
95 #define DEV_BYVDEV_PATH "/dev/disk/by-vdev/"
97 typedef void (*zfs_process_func_t
)(zpool_handle_t
*, nvlist_t
*, boolean_t
);
99 libzfs_handle_t
*g_zfshdl
;
100 list_t g_pool_list
; /* list of unavailable pools at initialization */
101 list_t g_device_list
; /* list of disks with asynchronous label request */
103 boolean_t g_enumeration_done
;
104 pthread_t g_zfs_tid
; /* zfs_enum_pools() thread */
106 typedef struct unavailpool
{
107 zpool_handle_t
*uap_zhp
;
108 list_node_t uap_node
;
111 typedef struct pendingdev
{
112 char pd_physpath
[128];
117 zfs_toplevel_state(zpool_handle_t
*zhp
)
123 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
124 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
125 verify(nvlist_lookup_uint64_array(nvroot
, ZPOOL_CONFIG_VDEV_STATS
,
126 (uint64_t **)&vs
, &c
) == 0);
127 return (vs
->vs_state
);
131 zfs_unavail_pool(zpool_handle_t
*zhp
, void *data
)
133 zed_log_msg(LOG_INFO
, "zfs_unavail_pool: examining '%s' (state %d)",
134 zpool_get_name(zhp
), (int)zfs_toplevel_state(zhp
));
136 if (zfs_toplevel_state(zhp
) < VDEV_STATE_DEGRADED
) {
138 uap
= malloc(sizeof (unavailpool_t
));
140 list_insert_tail((list_t
*)data
, uap
);
148 * Two stage replace on Linux
149 * since we get disk notifications
150 * we can wait for partitioned disk slice to show up!
152 * First stage tags the disk, initiates async partitioning, and returns
153 * Second stage finds the tag and proceeds to ZFS labeling/replace
155 * disk-add --> label-disk + tag-disk --> partition-add --> zpool_vdev_attach
157 * 1. physical match with no fs, no partition
158 * tag it top, partition disk
160 * 2. physical match again, see partion and tag
165 * The device associated with the given vdev (either by devid or physical path)
166 * has been added to the system. If 'isdisk' is set, then we only attempt a
167 * replacement if it's a whole disk. This also implies that we should label the
170 * First, we attempt to online the device (making sure to undo any spare
171 * operation when finished). If this succeeds, then we're done. If it fails,
172 * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
173 * but that the label was not what we expected. If the 'autoreplace' property
174 * is enabled, then we relabel the disk (if specified), and attempt a 'zpool
175 * replace'. If the online is successful, but the new state is something else
176 * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
177 * race, and we should avoid attempting to relabel the disk.
179 * Also can arrive here from a ESC_ZFS_VDEV_CHECK event
182 zfs_process_add(zpool_handle_t
*zhp
, nvlist_t
*vdev
, boolean_t labeled
)
185 vdev_state_t newstate
;
186 nvlist_t
*nvroot
, *newvd
;
187 pendingdev_t
*device
;
188 uint64_t wholedisk
= 0ULL;
189 uint64_t offline
= 0ULL;
190 uint64_t guid
= 0ULL;
191 char *physpath
= NULL
, *new_devid
= NULL
, *enc_sysfs_path
= NULL
;
192 char rawpath
[PATH_MAX
], fullpath
[PATH_MAX
];
193 char devpath
[PATH_MAX
];
200 if (nvlist_lookup_string(vdev
, ZPOOL_CONFIG_PATH
, &path
) != 0)
203 /* Skip healthy disks */
204 verify(nvlist_lookup_uint64_array(vdev
, ZPOOL_CONFIG_VDEV_STATS
,
205 (uint64_t **)&vs
, &c
) == 0);
206 if (vs
->vs_state
== VDEV_STATE_HEALTHY
) {
207 zed_log_msg(LOG_INFO
, "%s: %s is already healthy, skip it.",
212 (void) nvlist_lookup_string(vdev
, ZPOOL_CONFIG_PHYS_PATH
, &physpath
);
213 (void) nvlist_lookup_string(vdev
, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH
,
215 (void) nvlist_lookup_uint64(vdev
, ZPOOL_CONFIG_WHOLE_DISK
, &wholedisk
);
216 (void) nvlist_lookup_uint64(vdev
, ZPOOL_CONFIG_OFFLINE
, &offline
);
217 (void) nvlist_lookup_uint64(vdev
, ZPOOL_CONFIG_GUID
, &guid
);
220 return; /* don't intervene if it was taken offline */
222 is_dm
= zfs_dev_is_dm(path
);
223 zed_log_msg(LOG_INFO
, "zfs_process_add: pool '%s' vdev '%s', phys '%s'"
224 " wholedisk %d, dm %d (%llu)", zpool_get_name(zhp
), path
,
225 physpath
? physpath
: "NULL", wholedisk
, is_dm
,
226 (long long unsigned int)guid
);
229 * The VDEV guid is preferred for identification (gets passed in path)
232 (void) snprintf(fullpath
, sizeof (fullpath
), "%llu",
233 (long long unsigned int)guid
);
236 * otherwise use path sans partition suffix for whole disks
238 (void) strlcpy(fullpath
, path
, sizeof (fullpath
));
240 char *spath
= zfs_strip_partition(fullpath
);
242 zed_log_msg(LOG_INFO
, "%s: Can't alloc",
247 (void) strlcpy(fullpath
, spath
, sizeof (fullpath
));
253 * Attempt to online the device.
255 if (zpool_vdev_online(zhp
, fullpath
,
256 ZFS_ONLINE_CHECKREMOVE
| ZFS_ONLINE_UNSPARE
, &newstate
) == 0 &&
257 (newstate
== VDEV_STATE_HEALTHY
||
258 newstate
== VDEV_STATE_DEGRADED
)) {
259 zed_log_msg(LOG_INFO
, " zpool_vdev_online: vdev %s is %s",
260 fullpath
, (newstate
== VDEV_STATE_HEALTHY
) ?
261 "HEALTHY" : "DEGRADED");
266 * vdev_id alias rule for using scsi_debug devices (FMA automated
269 if (physpath
!= NULL
&& strcmp("scsidebug", physpath
) == 0)
273 * If the pool doesn't have the autoreplace property set, then use
274 * vdev online to trigger a FMA fault by posting an ereport.
276 if (!zpool_get_prop_int(zhp
, ZPOOL_PROP_AUTOREPLACE
, NULL
) ||
277 !(wholedisk
|| is_dm
) || (physpath
== NULL
)) {
278 (void) zpool_vdev_online(zhp
, fullpath
, ZFS_ONLINE_FORCEFAULT
,
280 zed_log_msg(LOG_INFO
, "Pool's autoreplace is not enabled or "
281 "not a whole disk for '%s'", fullpath
);
286 * Convert physical path into its current device node. Rawpath
287 * needs to be /dev/disk/by-vdev for a scsi_debug device since
288 * /dev/disk/by-path will not be present.
290 (void) snprintf(rawpath
, sizeof (rawpath
), "%s%s",
291 is_sd
? DEV_BYVDEV_PATH
: DEV_BYPATH_PATH
, physpath
);
293 if (realpath(rawpath
, devpath
) == NULL
&& !is_dm
) {
294 zed_log_msg(LOG_INFO
, " realpath: %s failed (%s)",
295 rawpath
, strerror(errno
));
297 (void) zpool_vdev_online(zhp
, fullpath
, ZFS_ONLINE_FORCEFAULT
,
300 zed_log_msg(LOG_INFO
, " zpool_vdev_online: %s FORCEFAULT (%s)",
301 fullpath
, libzfs_error_description(g_zfshdl
));
305 /* Only autoreplace bad disks */
306 if ((vs
->vs_state
!= VDEV_STATE_DEGRADED
) &&
307 (vs
->vs_state
!= VDEV_STATE_FAULTED
) &&
308 (vs
->vs_state
!= VDEV_STATE_CANT_OPEN
)) {
312 nvlist_lookup_string(vdev
, "new_devid", &new_devid
);
315 /* Don't label device mapper or multipath disks. */
316 } else if (!labeled
) {
318 * we're auto-replacing a raw disk, so label it first
323 * If this is a request to label a whole disk, then attempt to
324 * write out the label. Before we can label the disk, we need
325 * to map the physical string that was matched on to the under
328 * If any part of this process fails, then do a force online
329 * to trigger a ZFS fault for the device (and any hot spare
332 leafname
= strrchr(devpath
, '/') + 1;
335 * If this is a request to label a whole disk, then attempt to
336 * write out the label.
338 if (zpool_label_disk(g_zfshdl
, zhp
, leafname
) != 0) {
339 zed_log_msg(LOG_INFO
, " zpool_label_disk: could not "
340 "label '%s' (%s)", leafname
,
341 libzfs_error_description(g_zfshdl
));
343 (void) zpool_vdev_online(zhp
, fullpath
,
344 ZFS_ONLINE_FORCEFAULT
, &newstate
);
349 * The disk labeling is asynchronous on Linux. Just record
350 * this label request and return as there will be another
351 * disk add event for the partition after the labeling is
354 device
= malloc(sizeof (pendingdev_t
));
355 (void) strlcpy(device
->pd_physpath
, physpath
,
356 sizeof (device
->pd_physpath
));
357 list_insert_tail(&g_device_list
, device
);
359 zed_log_msg(LOG_INFO
, " zpool_label_disk: async '%s' (%llu)",
360 leafname
, (u_longlong_t
)guid
);
362 return; /* resumes at EC_DEV_ADD.ESC_DISK for partition */
364 } else /* labeled */ {
365 boolean_t found
= B_FALSE
;
367 * match up with request above to label the disk
369 for (device
= list_head(&g_device_list
); device
!= NULL
;
370 device
= list_next(&g_device_list
, device
)) {
371 if (strcmp(physpath
, device
->pd_physpath
) == 0) {
372 list_remove(&g_device_list
, device
);
377 zed_log_msg(LOG_INFO
, "zpool_label_disk: %s != %s",
378 physpath
, device
->pd_physpath
);
381 /* unexpected partition slice encountered */
382 zed_log_msg(LOG_INFO
, "labeled disk %s unexpected here",
384 (void) zpool_vdev_online(zhp
, fullpath
,
385 ZFS_ONLINE_FORCEFAULT
, &newstate
);
389 zed_log_msg(LOG_INFO
, " zpool_label_disk: resume '%s' (%llu)",
390 physpath
, (u_longlong_t
)guid
);
392 (void) snprintf(devpath
, sizeof (devpath
), "%s%s",
393 DEV_BYID_PATH
, new_devid
);
397 * Construct the root vdev to pass to zpool_vdev_attach(). While adding
398 * the entire vdev structure is harmless, we construct a reduced set of
399 * path/physpath/wholedisk to keep it simple.
401 if (nvlist_alloc(&nvroot
, NV_UNIQUE_NAME
, 0) != 0) {
402 zed_log_msg(LOG_WARNING
, "zfs_mod: nvlist_alloc out of memory");
405 if (nvlist_alloc(&newvd
, NV_UNIQUE_NAME
, 0) != 0) {
406 zed_log_msg(LOG_WARNING
, "zfs_mod: nvlist_alloc out of memory");
411 if (nvlist_add_string(newvd
, ZPOOL_CONFIG_TYPE
, VDEV_TYPE_DISK
) != 0 ||
412 nvlist_add_string(newvd
, ZPOOL_CONFIG_PATH
, path
) != 0 ||
413 nvlist_add_string(newvd
, ZPOOL_CONFIG_DEVID
, new_devid
) != 0 ||
414 (physpath
!= NULL
&& nvlist_add_string(newvd
,
415 ZPOOL_CONFIG_PHYS_PATH
, physpath
) != 0) ||
416 (enc_sysfs_path
!= NULL
&& nvlist_add_string(newvd
,
417 ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH
, enc_sysfs_path
) != 0) ||
418 nvlist_add_uint64(newvd
, ZPOOL_CONFIG_WHOLE_DISK
, wholedisk
) != 0 ||
419 nvlist_add_string(nvroot
, ZPOOL_CONFIG_TYPE
, VDEV_TYPE_ROOT
) != 0 ||
420 nvlist_add_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
, &newvd
,
422 zed_log_msg(LOG_WARNING
, "zfs_mod: unable to add nvlist pairs");
431 * Wait for udev to verify the links exist, then auto-replace
432 * the leaf disk at same physical location.
434 if (zpool_label_disk_wait(path
, 3000) != 0) {
435 zed_log_msg(LOG_WARNING
, "zfs_mod: expected replacement "
436 "disk %s is missing", path
);
441 ret
= zpool_vdev_attach(zhp
, fullpath
, path
, nvroot
, B_TRUE
);
443 zed_log_msg(LOG_INFO
, " zpool_vdev_replace: %s with %s (%s)",
444 fullpath
, path
, (ret
== 0) ? "no errors" :
445 libzfs_error_description(g_zfshdl
));
451 * Utility functions to find a vdev matching given criteria.
453 typedef struct dev_data
{
454 const char *dd_compare
;
456 zfs_process_func_t dd_func
;
458 boolean_t dd_islabeled
;
459 uint64_t dd_pool_guid
;
460 uint64_t dd_vdev_guid
;
461 const char *dd_new_devid
;
465 zfs_iter_vdev(zpool_handle_t
*zhp
, nvlist_t
*nvl
, void *data
)
467 dev_data_t
*dp
= data
;
473 * First iterate over any children.
475 if (nvlist_lookup_nvlist_array(nvl
, ZPOOL_CONFIG_CHILDREN
,
476 &child
, &children
) == 0) {
477 for (c
= 0; c
< children
; c
++)
478 zfs_iter_vdev(zhp
, child
[c
], data
);
482 /* once a vdev was matched and processed there is nothing left to do */
487 * Match by GUID if available otherwise fallback to devid or physical
489 if (dp
->dd_vdev_guid
!= 0) {
492 if (nvlist_lookup_uint64(nvl
, ZPOOL_CONFIG_GUID
,
493 &guid
) != 0 || guid
!= dp
->dd_vdev_guid
) {
496 zed_log_msg(LOG_INFO
, " zfs_iter_vdev: matched on %llu", guid
);
497 dp
->dd_found
= B_TRUE
;
499 } else if (dp
->dd_compare
!= NULL
) {
501 * NOTE: On Linux there is an event for partition, so unlike
502 * illumos, substring matching is not required to accommodate
503 * the partition suffix. An exact match will be present in
504 * the dp->dd_compare value.
506 if (nvlist_lookup_string(nvl
, dp
->dd_prop
, &path
) != 0 ||
507 strcmp(dp
->dd_compare
, path
) != 0)
510 zed_log_msg(LOG_INFO
, " zfs_iter_vdev: matched %s on %s",
512 dp
->dd_found
= B_TRUE
;
514 /* pass the new devid for use by replacing code */
515 if (dp
->dd_new_devid
!= NULL
) {
516 (void) nvlist_add_string(nvl
, "new_devid",
521 (dp
->dd_func
)(zhp
, nvl
, dp
->dd_islabeled
);
525 zfs_enable_ds(void *arg
)
527 unavailpool_t
*pool
= (unavailpool_t
*)arg
;
529 (void) zpool_enable_datasets(pool
->uap_zhp
, NULL
, 0);
530 zpool_close(pool
->uap_zhp
);
535 zfs_iter_pool(zpool_handle_t
*zhp
, void *data
)
537 nvlist_t
*config
, *nvl
;
538 dev_data_t
*dp
= data
;
542 zed_log_msg(LOG_INFO
, "zfs_iter_pool: evaluating vdevs on %s (by %s)",
543 zpool_get_name(zhp
), dp
->dd_vdev_guid
? "GUID" : dp
->dd_prop
);
546 * For each vdev in this pool, look for a match to apply dd_func
548 if ((config
= zpool_get_config(zhp
, NULL
)) != NULL
) {
549 if (dp
->dd_pool_guid
== 0 ||
550 (nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
551 &pool_guid
) == 0 && pool_guid
== dp
->dd_pool_guid
)) {
552 (void) nvlist_lookup_nvlist(config
,
553 ZPOOL_CONFIG_VDEV_TREE
, &nvl
);
554 zfs_iter_vdev(zhp
, nvl
, data
);
559 * if this pool was originally unavailable,
560 * then enable its datasets asynchronously
562 if (g_enumeration_done
) {
563 for (pool
= list_head(&g_pool_list
); pool
!= NULL
;
564 pool
= list_next(&g_pool_list
, pool
)) {
566 if (strcmp(zpool_get_name(zhp
),
567 zpool_get_name(pool
->uap_zhp
)))
569 if (zfs_toplevel_state(zhp
) >= VDEV_STATE_DEGRADED
) {
570 list_remove(&g_pool_list
, pool
);
571 (void) tpool_dispatch(g_tpool
, zfs_enable_ds
,
579 return (dp
->dd_found
); /* cease iteration after a match */
583 * Given a physical device location, iterate over all
584 * (pool, vdev) pairs which correspond to that location.
587 devphys_iter(const char *physical
, const char *devid
, zfs_process_func_t func
,
590 dev_data_t data
= { 0 };
592 data
.dd_compare
= physical
;
594 data
.dd_prop
= ZPOOL_CONFIG_PHYS_PATH
;
595 data
.dd_found
= B_FALSE
;
596 data
.dd_islabeled
= is_slice
;
597 data
.dd_new_devid
= devid
; /* used by auto replace code */
599 (void) zpool_iter(g_zfshdl
, zfs_iter_pool
, &data
);
601 return (data
.dd_found
);
605 * Given a device identifier, find any vdevs with a matching devid.
606 * On Linux we can match devid directly which is always a whole disk.
609 devid_iter(const char *devid
, zfs_process_func_t func
, boolean_t is_slice
)
611 dev_data_t data
= { 0 };
613 data
.dd_compare
= devid
;
615 data
.dd_prop
= ZPOOL_CONFIG_DEVID
;
616 data
.dd_found
= B_FALSE
;
617 data
.dd_islabeled
= is_slice
;
618 data
.dd_new_devid
= devid
;
620 (void) zpool_iter(g_zfshdl
, zfs_iter_pool
, &data
);
622 return (data
.dd_found
);
626 * Handle a EC_DEV_ADD.ESC_DISK event.
629 * Expects: DEV_PHYS_PATH string in schema
630 * Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
632 * path: '/dev/dsk/c0t1d0s0' (persistent)
633 * devid: 'id1,sd@SATA_____Hitachi_HDS72101______JP2940HZ3H74MC/a'
634 * phys_path: '/pci@0,0/pci103c,1609@11/disk@1,0:a'
637 * provides: DEV_PHYS_PATH and DEV_IDENTIFIER strings in schema
638 * Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
640 * path: '/dev/sdc1' (not persistent)
641 * devid: 'ata-SAMSUNG_HD204UI_S2HGJD2Z805891-part1'
642 * phys_path: 'pci-0000:04:00.0-sas-0x4433221106000000-lun-0'
645 zfs_deliver_add(nvlist_t
*nvl
, boolean_t is_lofi
)
647 char *devpath
= NULL
, *devid
;
651 * Expecting a devid string and an optional physical location
653 if (nvlist_lookup_string(nvl
, DEV_IDENTIFIER
, &devid
) != 0)
656 (void) nvlist_lookup_string(nvl
, DEV_PHYS_PATH
, &devpath
);
658 is_slice
= (nvlist_lookup_boolean(nvl
, DEV_IS_PART
) == 0);
660 zed_log_msg(LOG_INFO
, "zfs_deliver_add: adding %s (%s) (is_slice %d)",
661 devid
, devpath
? devpath
: "NULL", is_slice
);
664 * Iterate over all vdevs looking for a match in the folllowing order:
665 * 1. ZPOOL_CONFIG_DEVID (identifies the unique disk)
666 * 2. ZPOOL_CONFIG_PHYS_PATH (identifies disk physical location).
668 * For disks, we only want to pay attention to vdevs marked as whole
669 * disks or are a multipath device.
671 if (!devid_iter(devid
, zfs_process_add
, is_slice
) && devpath
!= NULL
)
672 (void) devphys_iter(devpath
, devid
, zfs_process_add
, is_slice
);
678 * Called when we receive a VDEV_CHECK event, which indicates a device could not
679 * be opened during initial pool open, but the autoreplace property was set on
680 * the pool. In this case, we treat it as if it were an add event.
683 zfs_deliver_check(nvlist_t
*nvl
)
685 dev_data_t data
= { 0 };
687 if (nvlist_lookup_uint64(nvl
, ZFS_EV_POOL_GUID
,
688 &data
.dd_pool_guid
) != 0 ||
689 nvlist_lookup_uint64(nvl
, ZFS_EV_VDEV_GUID
,
690 &data
.dd_vdev_guid
) != 0 ||
691 data
.dd_vdev_guid
== 0)
694 zed_log_msg(LOG_INFO
, "zfs_deliver_check: pool '%llu', vdev %llu",
695 data
.dd_pool_guid
, data
.dd_vdev_guid
);
697 data
.dd_func
= zfs_process_add
;
699 (void) zpool_iter(g_zfshdl
, zfs_iter_pool
, &data
);
705 zfsdle_vdev_online(zpool_handle_t
*zhp
, void *data
)
707 char *devname
= data
;
708 boolean_t avail_spare
, l2cache
;
712 zed_log_msg(LOG_INFO
, "zfsdle_vdev_online: searching for '%s' in '%s'",
713 devname
, zpool_get_name(zhp
));
715 if ((tgt
= zpool_find_vdev_by_physpath(zhp
, devname
,
716 &avail_spare
, &l2cache
, NULL
)) != NULL
) {
717 char *path
, fullpath
[MAXPATHLEN
];
720 error
= nvlist_lookup_string(tgt
, ZPOOL_CONFIG_PATH
, &path
);
726 error
= nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_WHOLE_DISK
,
732 path
= strrchr(path
, '/');
734 path
= zfs_strip_partition(path
+ 1);
744 (void) strlcpy(fullpath
, path
, sizeof (fullpath
));
748 * We need to reopen the pool associated with this
749 * device so that the kernel can update the size of
750 * the expanded device. When expanding there is no
751 * need to restart the scrub from the beginning.
753 boolean_t scrub_restart
= B_FALSE
;
754 (void) zpool_reopen_one(zhp
, &scrub_restart
);
756 (void) strlcpy(fullpath
, path
, sizeof (fullpath
));
759 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_AUTOEXPAND
, NULL
)) {
760 vdev_state_t newstate
;
762 if (zpool_get_state(zhp
) != POOL_STATE_UNAVAIL
) {
763 error
= zpool_vdev_online(zhp
, fullpath
, 0,
765 zed_log_msg(LOG_INFO
, "zfsdle_vdev_online: "
766 "setting device '%s' to ONLINE state "
767 "in pool '%s': %d", fullpath
,
768 zpool_get_name(zhp
), error
);
779 * This function handles the ESC_DEV_DLE device change event. Use the
780 * provided vdev guid when looking up a disk or partition, when the guid
781 * is not present assume the entire disk is owned by ZFS and append the
782 * expected -part1 partition information then lookup by physical path.
785 zfs_deliver_dle(nvlist_t
*nvl
)
787 char *devname
, name
[MAXPATHLEN
];
790 if (nvlist_lookup_uint64(nvl
, ZFS_EV_VDEV_GUID
, &guid
) == 0) {
791 sprintf(name
, "%llu", (u_longlong_t
)guid
);
792 } else if (nvlist_lookup_string(nvl
, DEV_PHYS_PATH
, &devname
) == 0) {
793 strlcpy(name
, devname
, MAXPATHLEN
);
794 zfs_append_partition(name
, MAXPATHLEN
);
796 zed_log_msg(LOG_INFO
, "zfs_deliver_dle: no guid or physpath");
799 if (zpool_iter(g_zfshdl
, zfsdle_vdev_online
, name
) != 1) {
800 zed_log_msg(LOG_INFO
, "zfs_deliver_dle: device '%s' not "
809 * syseventd daemon module event handler
811 * Handles syseventd daemon zfs device related events:
813 * EC_DEV_ADD.ESC_DISK
814 * EC_DEV_STATUS.ESC_DEV_DLE
815 * EC_ZFS.ESC_ZFS_VDEV_CHECK
817 * Note: assumes only one thread active at a time (not thread safe)
820 zfs_slm_deliver_event(const char *class, const char *subclass
, nvlist_t
*nvl
)
823 boolean_t is_lofi
= B_FALSE
, is_check
= B_FALSE
, is_dle
= B_FALSE
;
825 if (strcmp(class, EC_DEV_ADD
) == 0) {
827 * We're mainly interested in disk additions, but we also listen
828 * for new loop devices, to allow for simplified testing.
830 if (strcmp(subclass
, ESC_DISK
) == 0)
832 else if (strcmp(subclass
, ESC_LOFI
) == 0)
838 } else if (strcmp(class, EC_ZFS
) == 0 &&
839 strcmp(subclass
, ESC_ZFS_VDEV_CHECK
) == 0) {
841 * This event signifies that a device failed to open
842 * during pool load, but the 'autoreplace' property was
843 * set, so we should pretend it's just been added.
846 } else if (strcmp(class, EC_DEV_STATUS
) == 0 &&
847 strcmp(subclass
, ESC_DEV_DLE
) == 0) {
854 ret
= zfs_deliver_dle(nvl
);
856 ret
= zfs_deliver_check(nvl
);
858 ret
= zfs_deliver_add(nvl
, is_lofi
);
865 zfs_enum_pools(void *arg
)
867 (void) zpool_iter(g_zfshdl
, zfs_unavail_pool
, (void *)&g_pool_list
);
869 * Linux - instead of using a thread pool, each list entry
870 * will spawn a thread when an unavailable pool transitions
871 * to available. zfs_slm_fini will wait for these threads.
873 g_enumeration_done
= B_TRUE
;
878 * called from zed daemon at startup
880 * sent messages from zevents or udev monitor
882 * For now, each agent has it's own libzfs instance
887 if ((g_zfshdl
= libzfs_init()) == NULL
)
891 * collect a list of unavailable pools (asynchronously,
892 * since this can take a while)
894 list_create(&g_pool_list
, sizeof (struct unavailpool
),
895 offsetof(struct unavailpool
, uap_node
));
897 if (pthread_create(&g_zfs_tid
, NULL
, zfs_enum_pools
, NULL
) != 0) {
898 list_destroy(&g_pool_list
);
899 libzfs_fini(g_zfshdl
);
903 list_create(&g_device_list
, sizeof (struct pendingdev
),
904 offsetof(struct pendingdev
, pd_node
));
913 pendingdev_t
*device
;
915 /* wait for zfs_enum_pools thread to complete */
916 (void) pthread_join(g_zfs_tid
, NULL
);
917 /* destroy the thread pool */
918 if (g_tpool
!= NULL
) {
920 tpool_destroy(g_tpool
);
923 while ((pool
= (list_head(&g_pool_list
))) != NULL
) {
924 list_remove(&g_pool_list
, pool
);
925 zpool_close(pool
->uap_zhp
);
928 list_destroy(&g_pool_list
);
930 while ((device
= (list_head(&g_device_list
))) != NULL
) {
931 list_remove(&g_device_list
, device
);
934 list_destroy(&g_device_list
);
936 libzfs_fini(g_zfshdl
);
940 zfs_slm_event(const char *class, const char *subclass
, nvlist_t
*nvl
)
942 zed_log_msg(LOG_INFO
, "zfs_slm_event: %s.%s", class, subclass
);
943 (void) zfs_slm_deliver_event(class, subclass
, nvl
);