]> git.proxmox.com Git - mirror_zfs.git/commitdiff
zed: unclean disk attachment faults the vdev
authorAmeer Hamza <106930537+ixhamza@users.noreply.github.com>
Tue, 29 Nov 2022 17:24:10 +0000 (22:24 +0500)
committerGitHub <noreply@github.com>
Tue, 29 Nov 2022 17:24:10 +0000 (09:24 -0800)
If the attached disk already contains a vdev GUID, it
means the disk is not clean. In such a scenario, the
physical path would be a match that makes the disk
faulted when trying to online it. So, we would only
want to proceed if either GUID matches with the last
attached disk or the disk is in a clean state.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Ryan Moeller <ryan@iXsystems.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Ameer Hamza <ahamza@ixsystems.com>
Closes #14181

cmd/zed/agents/zfs_mod.c
module/zfs/vdev.c

index 0271fffe42ef7b2d43d1fde410b538384ac4863a..4fc8ceb9fb44d159b59f70c39a9119fc9df794c0 100644 (file)
@@ -535,6 +535,7 @@ typedef struct dev_data {
        boolean_t               dd_islabeled;
        uint64_t                dd_pool_guid;
        uint64_t                dd_vdev_guid;
+       uint64_t                dd_new_vdev_guid;
        const char              *dd_new_devid;
 } dev_data_t;
 
@@ -545,6 +546,7 @@ zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
        char *path = NULL;
        uint_t c, children;
        nvlist_t **child;
+       uint64_t guid = 0;
 
        /*
         * First iterate over any children.
@@ -572,17 +574,14 @@ zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
        /* once a vdev was matched and processed there is nothing left to do */
        if (dp->dd_found)
                return;
+       (void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID, &guid);
 
        /*
         * Match by GUID if available otherwise fallback to devid or physical
         */
        if (dp->dd_vdev_guid != 0) {
-               uint64_t guid;
-
-               if (nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID,
-                   &guid) != 0 || guid != dp->dd_vdev_guid) {
+               if (guid != dp->dd_vdev_guid)
                        return;
-               }
                zed_log_msg(LOG_INFO, "  zfs_iter_vdev: matched on %llu", guid);
                dp->dd_found = B_TRUE;
 
@@ -592,6 +591,12 @@ zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
                 * illumos, substring matching is not required to accommodate
                 * the partition suffix. An exact match will be present in
                 * the dp->dd_compare value.
+                * If the attached disk already contains a vdev GUID, it means
+                * the disk is not clean. In such a scenario, the physical path
+                * would be a match that makes the disk faulted when trying to
+                * online it. So, we would only want to proceed if either GUID
+                * matches with the last attached disk or the disk is in clean
+                * state.
                 */
                if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 ||
                    strcmp(dp->dd_compare, path) != 0) {
@@ -599,6 +604,12 @@ zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
                            __func__, dp->dd_compare, path);
                        return;
                }
+               if (dp->dd_new_vdev_guid != 0 && dp->dd_new_vdev_guid != guid) {
+                       zed_log_msg(LOG_INFO, "  %s: no match (GUID:%llu"
+                           " != vdev GUID:%llu)", __func__,
+                           dp->dd_new_vdev_guid, guid);
+                       return;
+               }
 
                zed_log_msg(LOG_INFO, "  zfs_iter_vdev: matched %s on %s",
                    dp->dd_prop, path);
@@ -680,7 +691,7 @@ zfs_iter_pool(zpool_handle_t *zhp, void *data)
  */
 static boolean_t
 devphys_iter(const char *physical, const char *devid, zfs_process_func_t func,
-    boolean_t is_slice)
+    boolean_t is_slice, uint64_t new_vdev_guid)
 {
        dev_data_t data = { 0 };
 
@@ -690,6 +701,7 @@ devphys_iter(const char *physical, const char *devid, zfs_process_func_t func,
        data.dd_found = B_FALSE;
        data.dd_islabeled = is_slice;
        data.dd_new_devid = devid;      /* used by auto replace code */
+       data.dd_new_vdev_guid = new_vdev_guid;
 
        (void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
 
@@ -858,7 +870,7 @@ zfs_deliver_add(nvlist_t *nvl)
        if (devid_iter(devid, zfs_process_add, is_slice))
                return (0);
        if (devpath != NULL && devphys_iter(devpath, devid, zfs_process_add,
-           is_slice))
+           is_slice, vdev_guid))
                return (0);
        if (vdev_guid != 0)
                (void) guid_iter(pool_guid, vdev_guid, devid, zfs_process_add,
index 4520ca31b7d738afe8dadab4198ab12635721d84..891744261014c9fdf311cff93f8c2adb88aea778 100644 (file)
@@ -4269,9 +4269,9 @@ vdev_clear(spa_t *spa, vdev_t *vd)
                vdev_clear(spa, vd->vdev_child[c]);
 
        /*
-        * It makes no sense to "clear" an indirect vdev.
+        * It makes no sense to "clear" an indirect  or removed vdev.
         */
-       if (!vdev_is_concrete(vd))
+       if (!vdev_is_concrete(vd) || vd->vdev_removed)
                return;
 
        /*